Purpose
This statement creates a new table in the database.
Syntax
CREATE [hint_options] [TEMPORARY] TABLE [IF NOT EXISTS] table_name
(table_definition_list) [table_option_list] [partition_option] [[MERGE_ENGINE = {delete_insert | partial_update}] table_column_group_option] [IGNORE | REPLACE] [AS] select;
CREATE TABLE [TEMPORARY] [IF NOT EXISTS] table_name
LIKE table_name;
table_definition_list:
table_definition [, table_definition ...]
table_definition:
column_definition_list
| [CONSTRAINT [constraint_name]] PRIMARY KEY index_desc
| [CONSTRAINT [constraint_name]] UNIQUE {INDEX | KEY}
[index_name] index_desc
| [CONSTRAINT [constraint_name]] FOREIGN KEY
[index_name] index_desc
REFERENCES reference_definition
[match_action][opt_reference_option_list]
| [FULLTEXT] {INDEX | KEY} [index_name] [index_type] (key_part,...) [WITH PARSER tokenizer_option] [PARSER_PROPERTIES[=](parser_properties_list)]
[index_option_list] [index_column_group_option]
| index_json_clause
| [CONSTRAINT [constraint_name]] CHECK(expression) constranit_state
column_definition_list:
column_definition [, column_definition ...]
column_definition:
column_name data_type
[DEFAULT const_value] [AUTO_INCREMENT]
[NULL | NOT NULL] [[PRIMARY] KEY] [UNIQUE [KEY]] [COMMENT string_value] [SKIP_INDEX(skip_index_option_list)]
| column_name data_type
[GENERATED ALWAYS] AS (expr) [VIRTUAL | STORED]
[opt_generated_column_attribute]
skip_index_option_list:
skip_index_option [,skip_index_option ...]
skip_index_option:
MIN_MAX
| SUM
index_desc:
(column_desc_list) [index_type] [index_option_list]
match_action:
MATCH {SIMPLE | FULL | PARTIAL}
opt_reference_option_list:
reference_option [,reference_option ...]
reference_option:
ON {DELETE | UPDATE} {RESTRICT | CASCADE | SET NULL | NO ACTION | SET DEFAULT}
tokenizer_option:
SPACE
| NGRAM
| BENG
| IK
| NGRAM2
parser_properties_list:
parser_properties, [parser_properties]
parser_properties:
min_token_size = int_value
| max_token_size = int_value
| ngram_token_size = int_value
| ik_mode = 'char_value'
| min_ngram_size = int_value
| max_ngram_size = int_value
key_part:
{index_col_name [(length)] | (expr)} [ASC | DESC]
index_type:
USING BTREE
index_option_list:
index_option [ index_option ...]
index_option:
[GLOBAL | LOCAL]
| block_size
| compression
| STORING(column_name_list)
| COMMENT string_value
table_option_list:
table_option [ table_option ...]
table_option:
[DEFAULT] {CHARSET | CHARACTER SET} [=] charset_name
| [DEFAULT] COLLATE [=] collation_name
| table_tablegroup
| block_size
| lob_inrow_threshold [=] num
| compression
| AUTO_INCREMENT [=] INT_VALUE
| COMMENT string_value
| ROW_FORMAT [=] REDUNDANT|COMPACT|DYNAMIC|COMPRESSED|DEFAULT
| PCTFREE [=] num
| parallel_clause
| DUPLICATE_SCOPE [=] 'none|cluster'
| TABLE_MODE [=] 'table_mode_value'
| auto_increment_cache_size [=] INT_VALUE
| READ {ONLY | WRITE}
| ORGANIZATION [=] {INDEX | HEAP}
| enable_macro_block_bloom_filter [=] {True | False}
| DYNAMIC_PARTITION_POLICY [=] (dynamic_partition_policy_list)
| SEMISTRUCT_ENCODING_TYPE [=] 'encoding'
parallel_clause:
{NOPARALLEL | PARALLEL integer}
table_mode_value:
NORMAL
| QUEUING
| MODERATE
| SUPER
| EXTREME
dynamic_partition_policy_list:
dynamic_partition_policy_option [, dynamic_partition_policy_option ...]
dynamic_partition_policy_option:
ENABLE = {true | false}
| TIME_UNIT = {'hour' | 'day' | 'week' | 'month' | 'year'}
| PRECREATE_TIME = {'-1' | '0' | 'n {hour | day | week | month | year}'}
| EXPIRE_TIME = {'-1' | '0' | 'n {hour | day | week | month | year}'}
| TIME_ZONE = {'default' | 'time_zone'}
| BIGINT_PRECISION = {'none' | 'us' | 'ms' | 's'}
partition_option:
PARTITION BY HASH(expression)
[subpartition_option] PARTITIONS partition_count
| PARTITION BY KEY([column_name_list])
[subpartition_option] PARTITIONS partition_count
| PARTITION BY RANGE {(expression) | COLUMNS (column_name_list)}
[subpartition_option] (range_partition_list)
| PARTITION BY LIST {(expression) | COLUMNS (column_name_list)}
[subpartition_option] PARTITIONS partition_count
| PARTITION BY RANGE [COLUMNS]([column_name_list]) [SIZE('size_value')] (range_partition_list)
subpartition_option:
SUBPARTITION BY HASH(expression)
SUBPARTITIONS subpartition_count
| SUBPARTITION BY KEY(column_name_list)
SUBPARTITIONS subpartition_count
| SUBPARTITION BY RANGE {(expression) | COLUMNS (column_name_list)}
(range_subpartition_list)
| SUBPARTITION BY LIST(expression)
range_partition_list:
range_partition [, range_partition ...]
range_partition:
PARTITION partition_name
VALUES LESS THAN {(expression_list) | MAXVALUE}
range_subpartition_list:
range_subpartition [, range_subpartition ...]
range_subpartition:
SUBPARTITION subpartition_name
VALUES LESS THAN {(expression_list) | MAXVALUE}
expression_list:
expression [, expression ...]
column_name_list:
column_name [, column_name ...]
partition_name_list:
partition_name [, partition_name ...]
partition_count | subpartition_count:
INT_VALUE
table_column_group_option/index_column_group_option:
WITH COLUMN GROUP(all columns)
| WITH COLUMN GROUP(each column)
| WITH COLUMN GROUP(all columns, each column)
index_json_clause:
[UNIQUE] INDEX idx_json_name((CAST(json_column_name->'$.json_field_name' AS UNSIGNED ARRAY)))
| INDEX idx_json_name(column_name, [column_name, ...] (CAST(json_column_name->'$.json_field_name' AS CHAR(n) ARRAY)))
Parameters
| Parameter | Description |
|---|---|
| hint_options | Optional. Specifies the hint options. You can manually specify the bypass import hint, including APPEND, DIRECT, and NO_DIRECT. The corresponding hint format is /*+ [APPEND | DIRECT(need_sort,max_error,load_type)] parallel(N) |NO_DIRECT */. For more information about bypass importing data by using the CREATE TABLE AS SELECT statement, see the Use the CREATE TABLE AS SELECT statement to bypass import data section in Full bypass import. |
| TEMPORARY | Optional. Indicates that the table is created as a temporary table.
NoteThis parameter is supported starting from OceanBase Database V4.3.5 BP4. |
| IF NOT EXISTS | If you specify IF NOT EXISTS, an error will not be returned even if the table to be created already exists. If you do not specify this option and the table to be created already exists, an error will be returned. |
| IGNORE | REPLACE | Optional. Specifies how to handle rows with duplicate unique key values when you use the CREATE TABLE ... SELECT statement. If you do not specify IGNORE or REPLACE, an error will be returned when rows with duplicate unique key values exist.
|
| PRIMARY KEY | Specifies the primary key of the created table. If you do not specify a primary key, a hidden primary key will be used. OceanBase Database allows you to modify the primary key of a table or add a primary key to a table by using the ALTER TABLE statement. For more information, see ALTER TABLE. |
| FOREIGN KEY | Specifies the foreign key of the created table. If you do not specify a foreign key name, the foreign key name will be generated by combining the table name, OBFK, and the creation time. (For example, the foreign key name created for the t1 table on August 1, 2021, 00:00:00 is t1_OBFK_1627747200000000.) A foreign key allows cross-table references to related data. When a UPDATE or DELETE operation affects the key values in the parent table that match the rows in the child table, the result depends on the reference operations specified in the ON UPDATE and ON DELETE clauses:
SET DEFAULT operation is supported. |
| FULLTEXT | Optional. Specifies whether to create a full-text index. For more information about creating a full-text index, see the Create a full-text index section in Create an index.
NoticeOnly local full-text indexes are supported in the current version. |
| WITH PARSER tokenizer_option | Optional. Specifies the tokenizer for the full-text index. For more information, see tokenizer_option below. |
| PARSER_PROPERTIES[=](parser_properties_list) | Optional. Specifies the properties of the tokenizer. For more information, see parser_properties below.
NoteStarting from OceanBase Database V4.3.5 BP1, you can specify the properties of the tokenizer ( |
| KEY | INDEX | Specifies the key or index of the created table. If you do not specify an index name, the index name will be generated by using the first column of the index reference. If the index name is duplicated, the index name will be generated by using the underscore (_) and the index sequence number. (For example, if the index name is duplicated when you use the c1 column to create an index, the index name will be c1_2.) You can execute the SHOW INDEX statement to view the indexes on a table. |
| key_part | Creates (function) indexes. |
| index_col_name | Specifies the column name of the index. ASC (ascending) is supported after each column name, but DESC (descending) is not supported. The default is ascending. The sorting method for creating indexes is as follows: first, sort by the value of the first column in index_col_name; then, for records with the same value in this column, sort by the value of the next column name; and so on. |
| expr | Specifies a valid function index expression, which can be a boolean expression, for example, c1=c1.
NoticeOceanBase Database does not allow you to create function indexes on generated columns. |
| ROW_FORMAT | Specifies whether to enable the Encoding storage format.
|
| [GENERATED ALWAYS] AS (expr) [VIRTUAL | STORED] | Creates a generated column. expr specifies the expression for calculating the column value.
|
| BLOCK_SIZE | Specifies the size of microblocks in the table. |
| lob_inrow_threshold | Specifies the INROW threshold for LOB fields. When the data size of a LOB field exceeds this threshold, the excess data will be stored in the LOB Meta table as OUTROW. The default value is controlled by the variable ob_default_lob_inrow_threshold. |
| COMPRESSION | Specifies the compression algorithm for the table. Valid values:
|
| CHARSET | CHARACTER SET | Specifies the default character set for columns in the table. For more information about supported character sets, see Character sets. |
| COLLATE | Specifies the default collation for columns in the table. For more information about supported collations, see Collations. |
| table_tablegroup | Specifies the tablegroup to which the table belongs. |
| AUTO_INCREMENT | Specifies the initial value for the auto-increment column in the table. OceanBase Database supports using an auto-increment column as a partitioning key. |
| COMMENT | The comment. The comment is not case-sensitive. |
| PCTFREE | Specifies the percentage of space reserved in a macroblock. |
| parallel_clause | Specifies the degree of parallelism at the table level:
|
| DUPLICATE_SCOPE | Specifies the replication scope of the table. Valid values:
cluster level. |
| CHECK | Limits the range of values in a column.
SHOW CREATE TABLE command.information_schema.TABLE_CONSTRAINTS view.information_schema.CHECK_CONSTRAINTS view. |
| constraint_name | The name of the constraint, which can be up to 64 characters in length.
|
| expression | The constraint expression.
|
| MERGE_ENGINE = {delete_insert | partial_update} | Optional. Specifies the update model for the table. Valid values:
Note
|
| table_column_group_option/index_column_group_option | Specifies the columnar storage options for the table or index. The following table describes the options.
|
| SKIP_INDEX | Specifies the Skip Index attribute of a column. Valid values:
Notice
|
| index_json_clause | Specifies the clause for creating a multi-valued index. For more information about multi-valued indexes, see the Create a multi-valued index section in Create an index.
NoticeThe multi-valued index feature is currently in the experimental stage and is not recommended for use in production environments to avoid affecting system stability. |
| json_column_name | Specifies the name of the column with the JSON data type in the table. |
| idx_json_name | Specifies the name of the multi-valued index to be created. |
| json_field_name | Specifies the name of the field in the JSON column that needs to be indexed. |
| TABLE_MODE | Optional. Specifies the merge trigger threshold and merge strategy, which control the behavior of data compaction after data is dumped. For more information about the values, see table_mode_value below. |
| auto_increment_cache_size | Optional. Specifies the number of cached auto-increment values. The default value is 0, indicating that this parameter is not configured. In this case, the system uses the tenant-level configuration item auto_increment_cache_size as the cache size for auto-increment columns. |
| READ {ONLY | WRITE} | Specifies the read/write permissions for the table. The options are as follows:
|
| ORGANIZATION [=] {INDEX | HEAP} | Optional. Specifies the storage order of data rows in the table, i.e., the table organization mode. The options are as follows:
ORGANIZATION option is not specified, its value is the same as the value of the configuration item default_table_organization.
NoteFor OceanBase Database V4.3.5, the |
| enable_macro_block_bloom_filter [=] {True | False} | Specifies whether to persist the bloom filter at the macroblock level. The options are as follows:
NoteFor OceanBase Database V4.3.5, the |
| DYNAMIC_PARTITION_POLICY [=] (dynamic_partition_policy_list) | Specifies the dynamic partition management attribute for the table, enabling automatic creation and deletion of partitions. dynamic_partition_policy_list is a list of configurable parameters for the dynamic partition strategy, with each parameter separated by an English comma. For more information, see dynamic_partition_policy_option below.
NoteFor OceanBase Database V4.3.5, specifying the dynamic partition management attribute for the table is supported starting from V4.3.5 BP2. |
| PARTITION BY RANGE [COLUMNS]([column_name_list]) [SIZE('size_value')] | Specifies the creation of an auto-partitioned table. For more information, see the automatic partitioning syntax when creating a table in Automatic partition splitting. |
| SEMISTRUCT_ENCODING_TYPE | Optional. Specifies the encoding type for semi-structured data. The options are as follows:
|
table_mode_value
Note
Among the listed TABLE_MODE modes, all modes except NORMAL represent QUEUING tables. The QUEUING table is the most basic table type. The following modes (excluding the NORMAL mode) represent more aggressive compaction strategies.
NORMAL: The default value. This mode indicates normal operation. In this mode, the probability of triggering a major compaction after a data dump is extremely low.QUEUING: In this mode, the probability of triggering a major compaction after a data dump is low.MODERATE: This mode indicates moderate operation. In this mode, the probability of triggering a major compaction after a data dump is moderate.SUPER: This mode indicates super operation. In this mode, the probability of triggering a major compaction after a data dump is high.EXTREME: This mode indicates extreme operation. In this mode, the probability of triggering a major compaction after a data dump is high.
For more information about major compactions, see Adaptive major compaction.
tokenizer_option
SPACE: The default value. This option indicates that the tokenizer splits text by spaces. You can specify the following properties:Property Value range min_token_size [1, 16] max_token_size [10, 84] NGRAM: This option indicates that the tokenizer splits text by N-grams (Chinese). You can specify the following properties:Property Value range ngram_token_size [1, 10] NGRAM2: This option indicates that the tokenizer splits text into consecutive characters of length betweenmin_ngram_sizeandmax_ngram_size. You can specify the following properties:Property Value range min_ngram_size [1, 16] max_ngram_size [1, 16] Note
For OceanBase Database V4.3.5, the
NGRAM2tokenizer is supported starting from V4.3.5 BP2.BENG: This option indicates that the tokenizer splits text by Beng (basic English). You can specify the following properties:Property Value range min_token_size [1, 16] max_token_size [10, 84] IK: This option indicates that the tokenizer splits text by IK (Chinese). Only theutf-8character set is supported. You can specify the following property:Property Value range ik_mode smartmax_word
Note
For OceanBase Database V4.3.5, the
IKtokenizer is supported starting from V4.3.5 BP1.
parser_properties
min_token_size: The minimum token length. Default value: 3. Value range: 1 to 16.max_token_size: The maximum token length. Default value: 84. Value range: 10 to 84.ngram_token_size: The token length for theNGRAMtokenizer. This property is valid only for theNGRAMtokenizer. Default value: 2. Value range: 1 to 10.ik_mode: The tokenization mode for theIKtokenizer. Valid values:smart: The default value. This mode uses the dictionary to improve tokenization accuracy. The boundaries of the dictionary words are prioritized, which may reduce unnecessary extensions.max_word: This mode identifies the words defined in the dictionary but does not affect the maximum extension of tokenization. Even if the dictionary defines words, themax_wordmode still tries to split the text into more words.
min_ngram_size: The minimum token length. Value range: [1, 16].max_ngram_size: The maximum token length. Value range: [1, 16].
Here is an example:
CREATE TABLE tbl1 (col1 VARCHAR(200), col2 TEXT,
FULLTEXT INDEX fidx(col2) WITH PARSER ik PARSER_PROPERTIES=(ik_mode='max_word'));
dynamic_partition_policy_option
ENABLE = {true | false}: Optional. Specifies whether to enable dynamic partitioning. You can modify this option. Valid values:true: The default value. Indicates that dynamic partitioning is enabled.false: Indicates that dynamic partitioning is disabled.
TIME_UNIT = {'hour' | 'day' | 'week' | 'month' | 'year'}: Required. Specifies the time unit for partitioning. You cannot modify this option. Valid values:hour: Partitions are created by hour.day: Partitions are created by day.week: Partitions are created by week.month: Partitions are created by month.year: Partitions are created by year.
PRECREATE_TIME = {'-1' | '0' | 'n {hour | day | week | month | year}'}: Optional. Specifies the time for pre-creation of partitions. When dynamic partitioning is scheduled, partitions are pre-created to ensure that the maximum partition upper bound is greater than now() + precreate_time. You can modify this option. Valid values:-1: The default value. Indicates that no partitions are pre-created.0: Indicates that only the current partition is pre-created.n {hour | day | week | month | year}: Indicates the time span for pre-creation of partitions. For example,3 hourindicates that partitions are pre-created for the past 3 hours.
Note
- When multiple partitions are pre-created, the partition boundary interval is
TIME_UNIT. - The boundary of the first pre-created partition is the ceiling of the current maximum partition boundary based on
TIME_UNIT.
EXPIRE_TIME = {'-1' | '0' | 'n {hour | day | week | month | year}'}: Optional. Specifies the time for partition expiration. When dynamic partitioning is scheduled, all partitions with an upper bound less than now() - expire_time are deleted. You can modify this option. Valid values:-1: The default value. Indicates that partitions do not expire.0: Indicates that all partitions except the current one expire.n {hour | day | week | month | year}: Indicates the time for partition expiration. For example,1 dayindicates that partitions expire after 1 day.
TIME_ZONE = {'default' | 'time_zone'}: Optional. Specifies the time zone for determining the current time and the partition key values of thedate,datetime, andyeartypes. You cannot modify this option. Valid values:default: The default value. Indicates that the tenant time zone is used. For other types, thetime_zonefield must bedefault.time_zone: Indicates a custom time zone offset. For example,+8:00.
BIGINT_PRECISION = {'none' | 'us' | 'ms' | 's'}: Optional. Specifies the timestamp precision of thebiginttype partition key. You cannot modify this option. Valid values:none: The default value. Indicates that no precision is specified (the partition key is not of thebiginttype).us: Microsecond precision.ms: Millisecond precision.s: Second precision.
For more information about how to create a dynamic partition table, see Create a dynamic partition table.
Here is an example:
CREATE TABLE tbl2 (col1 INT, col2 DATETIME)
DYNAMIC_PARTITION_POLICY(
ENABLE = true,
TIME_UNIT = 'hour',
PRECREATE_TIME = '3 hour',
EXPIRE_TIME = '1 day',
TIME_ZONE = '+8:00',
BIGINT_PRECISION = 'none')
PARTITION BY RANGE COLUMNS (col2)(
PARTITION P0 VALUES LESS THAN ('2024-11-11 13:30:00')
);
Examples
Create a database table.
obclient> CREATE TABLE tbl1 (c1 INT PRIMARY KEY, c2 VARCHAR(50)); Query OK, 0 rows affectedCreate a table with an index.
obclient> CREATE TABLE tbl2 (c1 INT PRIMARY KEY, c2 INT, c3 INT, INDEX i1 (c2)); Query OK, 0 rows affectedCreate a table
tbl3with a function-based index.obclient> CREATE TABLE tbl3 (c1 INT, c2 INT, INDEX i1 ((c1+1)), UNIQUE KEY ((c1+c2))); Query OK, 0 rows affected obclient> SHOW CREATE TABLE tbl3; +-------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ | Table | Create Table | +-------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ | tbl3 | CREATE TABLE `tbl3` ( `c1` int(11) DEFAULT NULL, `c2` int(11) DEFAULT NULL, UNIQUE KEY `functional_index` ((`c1` + `c2`)) BLOCK_SIZE 16384 LOCAL, KEY `i1` ((`c1` + 1)) BLOCK_SIZE 16384 LOCAL ) DEFAULT CHARSET = utf8mb4 ROW_FORMAT = DYNAMIC COMPRESSION = 'zstd_1.3.8' REPLICA_NUM = 2 BLOCK_SIZE = 16384 USE_BLOOM_FILTER = FALSE TABLET_SIZE = 134217728 PCTFREE = 0 | +-------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+Create a table with 8 hash partitions.
obclient> CREATE TABLE tbl4 (c1 INT PRIMARY KEY, c2 INT) PARTITION BY HASH(c1) PARTITIONS 8; Query OK, 0 rows affectedCreate a table with a range partition as the primary partition and a key partition as the subpartition.
obclient> CREATE TABLE tbl5 (c1 INT, c2 INT, c3 INT) PARTITION BY RANGE(c1) SUBPARTITION BY KEY(c2, c3) SUBPARTITIONS 5 (PARTITION p0 VALUES LESS THAN(0), PARTITION p1 VALUES LESS THAN(100)); Query OK, 0 rows affectedCreate a table with two columns: one of type
gbkand another of typeutf8.obclient> CREATE TABLE tbl6 (c1 VARCHAR(10), c2 VARCHAR(10) CHARSET GBK COLLATE gbk_bin) DEFAULT CHARSET utf8 COLLATE utf8mb4_general_ci; Query OK, 0 rows affectedEnable encoding and use
zstdcompression, with 5% space reserved for macroblocks.obclient> CREATE TABLE tbl7 (c1 INT, c2 INT, c3 VARCHAR(64)) COMPRESSION 'zstd_1.0' ROW_FORMAT DYNAMIC PCTFREE 5; Query OK, 0 rows affectedCreate a table
tbl8and set the parallelism to3.obclient> CREATE TABLE tbl8(c1 INT PRIMARY KEY, c2 INT) PARALLEL 3; Query OK, 0 rows affectedUse an auto-increment column as the partitioning key.
obclient> CREATE TABLE tbl9(inv_id BIGINT NOT NULL AUTO_INCREMENT,c1 BIGINT, PRIMARY KEY (inv_id) ) PARTITION BY HASH(inv_id) PARTITIONS 8; Query OK, 0 rows affectedSpecify a foreign key for the table
ref_t2. When anUPDATEoperation affects the key values in the parent table that match the rows in the child table, execute theSET NULLoperation.obclient> CREATE TABLE ref_t1(c1 INT PRIMARY KEY, C2 INT); Query OK, 0 rows affected obclient> CREATE TABLE ref_t2(c1 INT PRIMARY KEY, C2 INT,FOREIGN KEY(c2) REFERENCES ref_t1(c1) ON UPDATE SET NULL); Query OK, 0 rows affectedCreate a table
tbl10with aCHECKconstraint and view the constraint information.obclient> CREATE TABLE tbl10 (col1 INT, col2 INT, col3 INT, CONSTRAINT equal_check1 CHECK(col1 = col3 * 2)); Query OK, 0 rows affected obclient> SHOW CREATE TABLE tbl10; +-------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ | Table | Create Table | +-------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ | tbl10 | CREATE TABLE `tbl10` ( `col1` int(11) DEFAULT NULL, `col2` int(11) DEFAULT NULL, `col3` int(11) DEFAULT NULL, CONSTRAINT `equal_check1` CHECK ((`col1` = (`col3` * 2))) ) DEFAULT CHARSET = utf8mb4 ROW_FORMAT = DYNAMIC COMPRESSION = 'zstd_1.3.8' REPLICA_NUM = 1 BLOCK_SIZE = 16384 USE_BLOOM_FILTER = FALSE TABLET_SIZE = 134217728 PCTFREE = 0 | +-------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ 1 row in setCreate a table
tbl11based on the tabletbl10and view the table information.obclient> CREATE TABLE tbl11 LIKE tbl10; Query OK, 0 rows affected obclient> SHOW CREATE TABLE tbl11; +-------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ | Table | Create Table | +-------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ | tbl11 | CREATE TABLE `tbl11` ( `col1` int(11) DEFAULT NULL, `col2` int(11) DEFAULT NULL, `col3` int(11) DEFAULT NULL, CONSTRAINT `tbl11_OBCHECK_1650793233327894` CHECK ((`col1` = (`col3` * 2))) ) DEFAULT CHARSET = utf8mb4 ROW_FORMAT = DYNAMIC COMPRESSION = 'zstd_1.3.8' REPLICA_NUM = 1 BLOCK_SIZE = 16384 USE_BLOOM_FILTER = FALSE TABLET_SIZE = 134217728 PCTFREE = 0 | +-------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ 1 row in setCreate a replicated table
dup_t1at theclusterlevel.Log in to the sys tenant and create a unit.
obclient> CREATE RESOURCE UNIT IF NOT EXISTS 2c5g MAX_CPU 2, MEMORY_SIZE '5G'; Query OK, 0 rows affectedCreate a resource pool with a unit number of 2.
obclient> CREATE RESOURCE POOL tenant_pool UNIT = '2c5g', UNIT_NUM = 2, ZONE_LIST = ('z1', 'z2', 'z3'); Query OK, 0 rows affectedCreate a user tenant
obmysqland specify the locality distribution.obclient> CREATE TENANT obmysql resource_pool_list=('tenant_pool'), LOCALITY = "F@z1, F@z2, R@z3", PRIMARY_ZONE = "z1" SET ob_tcp_invited_nodes='%'; Query OK, 0 rows affectedLog in to the
obmysqluser tenant created in step 3 and create a replicated table.obclient> USE test; Database changed obclient> CREATE TABLE dup_t1(c1 INT) DUPLICATE_SCOPE = 'cluster'; Query OK, 0 rows affected(Optional) View the broadcast log stream information. The replicated table will be created on this log stream.
obclient> SELECT * FROM oceanbase.DBA_OB_LS WHERE FLAG LIKE "%DUPLICATE%"; +-------+--------+--------------+---------------+-------------+---------------------+----------+---------------------+---------------------+-----------+ | LS_ID | STATUS | PRIMARY_ZONE | UNIT_GROUP_ID | LS_GROUP_ID | CREATE_SCN | DROP_SCN | SYNC_SCN | READABLE_SCN | FLAG | +-------+--------+--------------+---------------+-------------+---------------------+----------+---------------------+---------------------+-----------+ | 1003 | NORMAL | z1;z2 | 0 | 0 | 1683267390195713284 | NULL | 1683337744205408139 | 1683337744205408139 | DUPLICATE | +-------+--------+--------------+---------------+-------------+---------------------+----------+---------------------+---------------------+-----------+ 1 row in set(Optional) View the replica distribution of the replicated table under the sys tenant. The
REPLICA_TYPEfield indicates the replica type.obclient> SELECT * FROM oceanbase.CDB_OB_TABLE_LOCATIONS WHERE TABLE_NAME = "dup_t1"; +-----------+---------------+------------+----------+------------+----------------+-------------------+------------+---------------+-----------+-------+------+----------------+----------+----------+--------------+-----------------+ | TENANT_ID | DATABASE_NAME | TABLE_NAME | TABLE_ID | TABLE_TYPE | PARTITION_NAME | SUBPARTITION_NAME | INDEX_NAME | DATA_TABLE_ID | TABLET_ID | LS_ID | ZONE | SVR_IP | SVR_PORT | ROLE | REPLICA_TYPE | DUPLICATE_SCOPE | +-----------+---------------+------------+----------+------------+----------------+-------------------+------------+---------------+-----------+-------+------+----------------+----------+----------+--------------+-----------------+ | 1002 | test | dup_t1 | 500002 | USER TABLE | NULL | NULL | NULL | NULL | 200001 | 1003 | z1 | 11.xxx.xxx.xxx | 36125 | LEADER | FULL | CLUSTER | | 1002 | test | dup_t1 | 500002 | USER TABLE | NULL | NULL | NULL | NULL | 200001 | 1003 | z1 | 11.xxx.xxx.xxx | 36124 | FOLLOWER | READONLY | CLUSTER | | 1002 | test | dup_t1 | 500002 | USER TABLE | NULL | NULL | NULL | NULL | 200001 | 1003 | z2 | 11.xxx.xxx.xxx | 36127 | FOLLOWER | FULL | CLUSTER | | 1002 | test | dup_t1 | 500002 | USER TABLE | NULL | NULL | NULL | NULL | 200001 | 1003 | z2 | 11.xxx.xxx.xxx | 36126 | FOLLOWER | READONLY | CLUSTER | | 1002 | test | dup_t1 | 500002 | USER TABLE | NULL | NULL | NULL | NULL | 200001 | 1003 | z3 | 11.xxx.xxx.xxx | 36128 | FOLLOWER | READONLY | CLUSTER | | 1002 | test | dup_t1 | 500002 | USER TABLE | NULL | NULL | NULL | NULL | 200001 | 1003 | z3 | 11.xxx.xxx.xxx | 36129 | FOLLOWER | READONLY | CLUSTER | +-----------+---------------+------------+----------+------------+----------------+-------------------+------------+---------------+-----------+-------+------+----------------+----------+----------+--------------+-----------------+ 6 rows in setInsert and read/write data from the replicated table like a regular table. For a read request, if you use a proxy, the read request may be routed to any OBServer node; if you directly connect to an OBServer node, the read request will be executed on the connected OBServer node as long as the local replica is readable.
obclient> INSERT INTO dup_t1 VALUES(1); Query OK, 1 row affected obclient> SELECT * FROM dup_t1; +------+ | c1 | +------+ | 1 | +------+ 1 row in set
Create a columnstore table
tbl1_cg.CREATE TABLE tbl1_cg (col1 INT PRIMARY KEY, col2 VARCHAR(50)) WITH COLUMN GROUP(each column);Create a table with a columnstore index.
CREATE TABLE tbl2_cg (col1 INT PRIMARY KEY, col2 INT, col3 INT, INDEX i1 (col2) WITH COLUMN GROUP(each column));Create a columnstore table with a columnstore index.
CREATE TABLE tbl3_cg (col1 INT PRIMARY KEY, col2 INT, col3 INT, INDEX i1 (col2) WITH COLUMN GROUP(each column)) WITH COLUMN GROUP(each column);Create a table and specify the Skip Index attribute for the column.
CREATE TABLE test_skidx( col1 INT SKIP_INDEX(MIN_MAX, SUM), col2 FLOAT SKIP_INDEX(MIN_MAX), col3 VARCHAR(1024) SKIP_INDEX(MIN_MAX), col4 CHAR(10) );Create a table
tbl1with an integer columncol1, and specify that the operation uses 5 parallel threads to complete. Also, specify that the data content of the new tabletbl1will be sourced from the query results of tabletbl2.CREATE /*+ parallel(5) */ TABLE tbl1 (col1 INT) AS SELECT col1 FROM tbl2;Create a table
tbwith a persistent bloom filter at the macroblock level.CREATE TABLE tb(c1 INT PRIMARY KEY, c2 INT) enable_macro_block_bloom_filter = True;