Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions common/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java
Original file line number Diff line number Diff line change
Expand Up @@ -500,6 +500,7 @@ public enum ErrorMsg {
CATALOG_NOT_EXISTS(10445, "Catalog {0} does not exists:", true),
INVALID_SCHEDULED_QUERY(10446, "Scheduled query {0} does not exist", true),
UNSUPPORTED_TIMESTAMP_PRECISION(10447, "Unsupported value for precision: {0}", true),
TABLE_NOT_EXISTS(10448, "Table does not exist."),

//========================== 20000 range starts here ========================//

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -78,6 +78,7 @@ public class MetastoreLock implements HiveLock {

private final ClientPool<IMetaStoreClient, TException> metaClients;

private final String catalogName;
private final String databaseName;
private final String tableName;
private final String fullName;
Expand All @@ -100,6 +101,7 @@ public MetastoreLock(Configuration conf, ClientPool<IMetaStoreClient, TException
String catalogName, String databaseName, String tableName) {
this.metaClients = metaClients;
this.fullName = catalogName + "." + databaseName + "." + tableName;
this.catalogName = catalogName;
this.databaseName = databaseName;
this.tableName = tableName;

Expand Down Expand Up @@ -276,7 +278,7 @@ private LockInfo createLock() throws LockException {
}

LockComponent lockComponent =
new LockComponent(LockType.EXCL_WRITE, LockLevel.TABLE, databaseName);
new LockComponent(LockType.EXCL_WRITE, LockLevel.TABLE, databaseName, catalogName);
lockComponent.setTablename(tableName);
LockRequest lockRequest =
new LockRequest(
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -74,6 +74,7 @@ public class BaseReplicationScenariosAcidTables {
static WarehouseInstance primary;
static WarehouseInstance replica, replicaNonAcid;
static HiveConf conf;
String primaryCatName = "hive";
String primaryDbName, replicatedDbName;
List<String> acidTableNames = new LinkedList<>();
private List<String> nonAcidTableNames = new LinkedList<>();
Expand Down Expand Up @@ -348,7 +349,7 @@ List<Long> openTxns(int numTxns, TxnStore txnHandler, HiveConf primaryConf) thro
return txns;
}

List<Long> allocateWriteIdsForTablesAndAcquireLocks(String primaryDbName, Map<String, Long> tables,
List<Long> allocateWriteIdsForTablesAndAcquireLocks(String primaryCatName, String primaryDbName, Map<String, Long> tables,
TxnStore txnHandler,
List<Long> txns, HiveConf primaryConf) throws Throwable {
AllocateTableWriteIdsRequest rqst = new AllocateTableWriteIdsRequest();
Expand All @@ -360,7 +361,7 @@ List<Long> allocateWriteIdsForTablesAndAcquireLocks(String primaryDbName, Map<St
txnHandler.allocateTableWriteIds(rqst);
for (long txnId : txns) {
LockComponent comp = new LockComponent(LockType.SHARED_WRITE, LockLevel.TABLE,
primaryDbName);
primaryDbName, primaryCatName);
comp.setTablename(entry.getKey());
comp.setOperationType(DataOperationType.UPDATE);
List<LockComponent> components = new ArrayList<LockComponent>(1);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -563,7 +563,7 @@ public void testReverseBootstrap() throws Throwable {
Map<String, Long> tablesInSecDb = new HashMap<>();
tablesInSecDb.put("t1", (long) numTxnsForSecDb + 4);
tablesInSecDb.put("t2", (long) numTxnsForSecDb + 4);
List<Long> lockIdsForSecDb = allocateWriteIdsForTablesAndAcquireLocks(primaryDbName + "_extra",
List<Long> lockIdsForSecDb = allocateWriteIdsForTablesAndAcquireLocks(primaryCatName, primaryDbName + "_extra",
tablesInSecDb, txnHandler, txnsForSecDb, primaryConf);
tearDownLockIds.addAll(lockIdsForSecDb);

Expand All @@ -576,7 +576,7 @@ public void testReverseBootstrap() throws Throwable {
Map<String, Long> tablesInSourceDb = new HashMap<>();
tablesInSourceDb.put("t1", (long) numTxnsForPrimaryDb + 6);
tablesInSourceDb.put("t2", (long) numTxnsForPrimaryDb);
List<Long> lockIdsForSourceDb = allocateWriteIdsForTablesAndAcquireLocks(replicatedDbName, tablesInSourceDb, txnHandler,
List<Long> lockIdsForSourceDb = allocateWriteIdsForTablesAndAcquireLocks(primaryCatName, replicatedDbName, tablesInSourceDb, txnHandler,
txnsForSourceDb, replica.getConf());
tearDownLockIds.addAll(lockIdsForSourceDb);

Expand Down Expand Up @@ -1092,7 +1092,7 @@ private List<String> setUpFirstIterForOptimisedBootstrap() throws Throwable {
Map<String, Long> tablesInSecDb = new HashMap<>();
tablesInSecDb.put("t1", (long) numTxnsForSecDb);
tablesInSecDb.put("t2", (long) numTxnsForSecDb);
List<Long> lockIdsForSecDb = allocateWriteIdsForTablesAndAcquireLocks(primaryDbName + "_extra",
List<Long> lockIdsForSecDb = allocateWriteIdsForTablesAndAcquireLocks(primaryCatName, primaryDbName + "_extra",
tablesInSecDb, txnHandler, txnsForSecDb, primaryConf);
tearDownLockIds.addAll(lockIdsForSecDb);

Expand All @@ -1105,7 +1105,7 @@ private List<String> setUpFirstIterForOptimisedBootstrap() throws Throwable {
Map<String, Long> tablesInSourceDb = new HashMap<>();
tablesInSourceDb.put("t1", (long) numTxnsForPrimaryDb);
tablesInSourceDb.put("t5", (long) numTxnsForPrimaryDb);
List<Long> lockIdsForSourceDb = allocateWriteIdsForTablesAndAcquireLocks(primaryDbName, tablesInSourceDb, txnHandler,
List<Long> lockIdsForSourceDb = allocateWriteIdsForTablesAndAcquireLocks(primaryCatName, primaryDbName, tablesInSourceDb, txnHandler,
txnsForSourceDb, primary.getConf());
tearDownLockIds.addAll(lockIdsForSourceDb);

Expand Down Expand Up @@ -1157,7 +1157,7 @@ private List<String> setUpFirstIterForOptimisedBootstrap() throws Throwable {
Map<String, Long> newTablesForSecDb = new HashMap<>();
newTablesForSecDb.put("t1", (long) numTxnsForSecDb + 1);
newTablesForSecDb.put("t2", (long) numTxnsForSecDb + 1);
List<Long> newLockIdsForSecDb = allocateWriteIdsForTablesAndAcquireLocks(primaryDbName + "_extra",
List<Long> newLockIdsForSecDb = allocateWriteIdsForTablesAndAcquireLocks(primaryCatName, primaryDbName + "_extra",
newTablesForSecDb, txnHandler, newTxnsForSecDb, primaryConf);
tearDownLockIds.addAll(newLockIdsForSecDb);

Expand All @@ -1169,7 +1169,7 @@ private List<String> setUpFirstIterForOptimisedBootstrap() throws Throwable {
Map<String, Long> newTablesInSourceDb = new HashMap<>();
newTablesInSourceDb.put("t1", (long) 5);
newTablesInSourceDb.put("t5", (long) 3);
List<Long> newLockIdsForSourceDb = allocateWriteIdsForTablesAndAcquireLocks(primaryDbName, newTablesInSourceDb, txnHandler,
List<Long> newLockIdsForSourceDb = allocateWriteIdsForTablesAndAcquireLocks(primaryCatName, primaryDbName, newTablesInSourceDb, txnHandler,
newTxnsForSourceDb, primary.getConf());
tearDownLockIds.addAll(newLockIdsForSourceDb);

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -524,7 +524,7 @@ public void testCompleteFailoverWithReverseBootstrap() throws Throwable {
Map<String, Long> tablesInSecDb = new HashMap<>();
tablesInSecDb.put("t1", (long) numTxnsForSecDb);
tablesInSecDb.put("t2", (long) numTxnsForSecDb);
List<Long> lockIdsForSecDb = allocateWriteIdsForTablesAndAcquireLocks(primaryDbName + "_extra",
List<Long> lockIdsForSecDb = allocateWriteIdsForTablesAndAcquireLocks(primaryCatName, primaryDbName + "_extra",
tablesInSecDb, txnHandler, txnsForSecDb, primaryConf);

//Open 2 txns for Primary Db
Expand All @@ -536,7 +536,7 @@ public void testCompleteFailoverWithReverseBootstrap() throws Throwable {
Map<String, Long> tablesInPrimaryDb = new HashMap<>();
tablesInPrimaryDb.put("t1", (long) numTxnsForPrimaryDb + 1);
tablesInPrimaryDb.put("t2", (long) numTxnsForPrimaryDb + 2);
List<Long> lockIdsForPrimaryDb = allocateWriteIdsForTablesAndAcquireLocks(primaryDbName,
List<Long> lockIdsForPrimaryDb = allocateWriteIdsForTablesAndAcquireLocks(primaryCatName, primaryDbName,
tablesInPrimaryDb, txnHandler, txnsForPrimaryDb, primaryConf);

//Open 1 txn with no hive locks acquired
Expand Down Expand Up @@ -1508,7 +1508,7 @@ public void testAcidTablesBootstrapWithOpenTxnsTimeout() throws Throwable {
Map<String, Long> tables = new HashMap<>();
tables.put("t1", numTxns + 1L);
tables.put("t2", numTxns + 2L);
List<Long> lockIds = allocateWriteIdsForTablesAndAcquireLocks(primaryDbName, tables, txnHandler, txns, primaryConf);
List<Long> lockIds = allocateWriteIdsForTablesAndAcquireLocks(primaryCatName, primaryDbName, tables, txnHandler, txns, primaryConf);

// Bootstrap dump with open txn timeout as 1s.
List<String> withConfigs = Arrays.asList(
Expand Down Expand Up @@ -1627,7 +1627,7 @@ public void testAcidTablesBootstrapWithOpenTxnsDiffDb() throws Throwable {
Map<String, Long> tablesInSecDb = new HashMap<>();
tablesInSecDb.put("t1", (long) numTxns);
tablesInSecDb.put("t2", (long) numTxns);
List<Long> lockIds = allocateWriteIdsForTablesAndAcquireLocks(primaryDbName + "_extra",
List<Long> lockIds = allocateWriteIdsForTablesAndAcquireLocks(primaryCatName, primaryDbName + "_extra",
tablesInSecDb, txnHandler, txns, primaryConf);

// Bootstrap dump with open txn timeout as 300s.
Expand Down Expand Up @@ -1723,7 +1723,7 @@ public void testAcidTablesBootstrapWithOpenTxnsWaitingForLock() throws Throwable
Map<String, Long> tablesInSecDb = new HashMap<>();
tablesInSecDb.put("t1", (long) numTxns);
tablesInSecDb.put("t2", (long) numTxns);
List<Long> lockIds = allocateWriteIdsForTablesAndAcquireLocks(primaryDbName + "_extra",
List<Long> lockIds = allocateWriteIdsForTablesAndAcquireLocks(primaryCatName, primaryDbName + "_extra",
tablesInSecDb, txnHandler, txns, primaryConf);

WarehouseInstance.Tuple bootstrapDump = primary
Expand Down Expand Up @@ -1789,14 +1789,14 @@ public void testAcidTablesBootstrapWithOpenTxnsPrimaryAndSecondaryDb() throws Th
Map<String, Long> tablesInSecDb = new HashMap<>();
tablesInSecDb.put("t1", (long) numTxns);
tablesInSecDb.put("t2", (long) numTxns);
List<Long> lockIds = allocateWriteIdsForTablesAndAcquireLocks(primaryDbName + "_extra",
List<Long> lockIds = allocateWriteIdsForTablesAndAcquireLocks(primaryCatName, primaryDbName + "_extra",
tablesInSecDb, txnHandler, txns, primaryConf);
// Allocate write ids for both tables of primary db for all txns
// t1=5+1L and t2=5+2L inserts
Map<String, Long> tablesInPrimDb = new HashMap<>();
tablesInPrimDb.put("t1", (long) numTxns + 1L);
tablesInPrimDb.put("t2", (long) numTxns + 2L);
lockIds.addAll(allocateWriteIdsForTablesAndAcquireLocks(primaryDbName,
lockIds.addAll(allocateWriteIdsForTablesAndAcquireLocks(primaryCatName, primaryDbName,
tablesInPrimDb, txnHandler, txnsSameDb, primaryConf));

// Bootstrap dump with open txn timeout as 1s.
Expand Down Expand Up @@ -1864,7 +1864,7 @@ public void testAcidTablesBootstrapWithOpenTxnsAbortDisabled() throws Throwable
Map<String, Long> tables = new HashMap<>();
tables.put("t1", numTxns + 1L);
tables.put("t2", numTxns + 2L);
List<Long> lockIds = allocateWriteIdsForTablesAndAcquireLocks(primaryDbName, tables, txnHandler, txns, primaryConf);
List<Long> lockIds = allocateWriteIdsForTablesAndAcquireLocks(primaryCatName, primaryDbName, tables, txnHandler, txns, primaryConf);

// Bootstrap dump with open txn timeout as 1s.
List<String> withConfigs = Arrays.asList(
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -200,7 +200,7 @@ public void testAcidTablesBootstrapDuringIncrementalWithOpenTxnsTimeout() throws
Map<String, Long> tables = new HashMap<>();
tables.put("t1", numTxns+2L);
tables.put("t2", numTxns+6L);
List<Long> lockIds = allocateWriteIdsForTablesAndAcquireLocks(primaryDbName, tables, txnHandler, txns, primaryConf);
List<Long> lockIds = allocateWriteIdsForTablesAndAcquireLocks(primaryCatName, primaryDbName, tables, txnHandler, txns, primaryConf);

// Bootstrap dump with open txn timeout as 1s.
List<String> withConfigs = new LinkedList<>(dumpWithAcidBootstrapClause);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,7 @@
public class TestInitiator2 extends CompactorTest {
@Test
public void dbNoAutoCompactSetTrue() throws Exception {
String catName = "hive";
String dbName = "test";
Map<String, String> dbParams = new HashMap<String, String>(1);
dbParams.put("no_auto_compaction", "true");
Expand All @@ -44,7 +45,7 @@ public void dbNoAutoCompactSetTrue() throws Exception {

for (int i = 0; i < 11; i++) {
long txnid = openTxn();
LockComponent comp = new LockComponent(LockType.SHARED_WRITE, LockLevel.TABLE, dbName);
LockComponent comp = new LockComponent(LockType.SHARED_WRITE, LockLevel.TABLE, dbName, catName);
comp.setTablename("dbnacst");
comp.setOperationType(DataOperationType.UPDATE);
List<LockComponent> components = new ArrayList<LockComponent>(1);
Expand All @@ -64,6 +65,7 @@ public void dbNoAutoCompactSetTrue() throws Exception {
@Test
public void dbNoAutoCompactSetFalseUpperCase() throws Exception {
boolean useCleanerForAbortCleanup = MetastoreConf.getBoolVar(conf, MetastoreConf.ConfVars.COMPACTOR_CLEAN_ABORTS_USING_CLEANER);
String catName = "hive";
String dbName = "test1";
Map<String, String> params = new HashMap<String, String>(1);
params.put("NO_AUTO_COMPACTION", "false");
Expand All @@ -77,7 +79,7 @@ public void dbNoAutoCompactSetFalseUpperCase() throws Exception {

for (int i = 0; i < 11; i++) {
long txnid = openTxn();
LockComponent comp = new LockComponent(LockType.SHARED_WRITE, LockLevel.TABLE, dbName);
LockComponent comp = new LockComponent(LockType.SHARED_WRITE, LockLevel.TABLE, dbName, catName);
comp.setTablename("dbnacsf");
comp.setOperationType(DataOperationType.UPDATE);
List<LockComponent> components = new ArrayList<LockComponent>(1);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -198,7 +198,7 @@ private void assertResult(List<String> expected, List<String> actual) {
public void testMaterializationLockCleaned() throws Exception {
TxnStore txnHandler = TxnUtils.getTxnStore(conf);
OpenTxnsResponse response = txnHandler.openTxns(new OpenTxnRequest(1, "user", "host"));
txnHandler.lockMaterializationRebuild("default", TABLE1, response.getTxn_ids().get(0));
txnHandler.lockMaterializationRebuild("hive", "default", TABLE1, response.getTxn_ids().get(0));

//Mimic the lock can be cleaned up
ValidTxnList validTxnList = Mockito.mock(ValidReadTxnList.class);
Expand Down
4 changes: 2 additions & 2 deletions parser/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g
Original file line number Diff line number Diff line change
Expand Up @@ -1265,7 +1265,7 @@ inputFileFormat
tabTypeExpr
@init { pushMsg("specifying table types", state); }
@after { popMsg(state); }
: identifier (DOT^ identifier)?
: identifier (DOT^ identifier (DOT^ identifier)?)?
(identifier (DOT^
(
(KW_ELEM_TYPE) => KW_ELEM_TYPE
Expand Down Expand Up @@ -1346,7 +1346,7 @@ showStatement
| KW_SHOW KW_TBLPROPERTIES tableName (LPAREN prptyName=StringLiteral RPAREN)? -> ^(TOK_SHOW_TBLPROPERTIES tableName $prptyName?)
| KW_SHOW KW_LOCKS
(
(KW_DATABASE|KW_SCHEMA) => (KW_DATABASE|KW_SCHEMA) (dbName=identifier) (isExtended=KW_EXTENDED)? -> ^(TOK_SHOWDBLOCKS $dbName $isExtended?)
(KW_DATABASE|KW_SCHEMA) => (KW_DATABASE|KW_SCHEMA) (name=databaseName) (isExtended=KW_EXTENDED)? -> ^(TOK_SHOWDBLOCKS $name $isExtended?)
|
(parttype=partTypeExpr)? (isExtended=KW_EXTENDED)? -> ^(TOK_SHOWLOCKS $parttype? $isExtended?)
)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -18,8 +18,11 @@

package org.apache.hadoop.hive.ql.ddl.table.lock.show;

import java.util.Arrays;
import java.util.List;
import java.util.Map;

import org.apache.hadoop.hive.ql.ErrorMsg;
import org.apache.hadoop.hive.ql.QueryState;
import org.apache.hadoop.hive.ql.ddl.DDLSemanticAnalyzerFactory.DDLType;
import org.apache.hadoop.hive.ql.ddl.DDLUtils;
Expand All @@ -30,6 +33,7 @@
import org.apache.hadoop.hive.ql.parse.BaseSemanticAnalyzer;
import org.apache.hadoop.hive.ql.parse.HiveParser;
import org.apache.hadoop.hive.ql.parse.SemanticException;
import org.apache.hadoop.hive.ql.session.SessionState;

/**
* Analyzer for show locks commands.
Expand All @@ -44,29 +48,56 @@ public ShowLocksAnalyzer(QueryState queryState) throws SemanticException {
public void analyzeInternal(ASTNode root) throws SemanticException {
ctx.setResFile(ctx.getLocalTmpPath());

String tableName = null;
String fullyQualifiedTableName = null;
Map<String, String> partitionSpec = null;
boolean isExtended = false;
if (root.getChildCount() >= 1) {
// table for which show locks is being executed
for (int i = 0; i < root.getChildCount(); i++) {
ASTNode child = (ASTNode) root.getChild(i);
if (child.getType() == HiveParser.TOK_TABTYPE) {
tableName = DDLUtils.getFQName((ASTNode) child.getChild(0));
fullyQualifiedTableName = DDLUtils.getFQName((ASTNode) child.getChild(0));
// get partition metadata if partition specified
if (child.getChildCount() == 2) {
ASTNode partitionSpecNode = (ASTNode) child.getChild(1);
partitionSpec = getValidatedPartSpec(getTable(tableName), partitionSpecNode, conf, false);
partitionSpec = getValidatedPartSpec(getTable(fullyQualifiedTableName), partitionSpecNode, conf, false);
}
} else if (child.getType() == HiveParser.KW_EXTENDED) {
isExtended = true;
}
}
}

String catalogName = null;
String dbName = null;
String tableName = null;

if (fullyQualifiedTableName != null) {
List<String> splitFullyQualifiedTableName = Arrays.stream(fullyQualifiedTableName.split("\\.")).toList();
if (splitFullyQualifiedTableName.size() == 1) {
catalogName = SessionState.get().getCurrentCatalog();
dbName = SessionState.get().getCurrentDatabase();
tableName = splitFullyQualifiedTableName.get(0);
} else if (splitFullyQualifiedTableName.size() == 2) {
catalogName = SessionState.get().getCurrentCatalog();
dbName = splitFullyQualifiedTableName.get(0);
tableName = splitFullyQualifiedTableName.get(1);
} else {
catalogName = splitFullyQualifiedTableName.get(0);
dbName = splitFullyQualifiedTableName.get(1);
tableName = splitFullyQualifiedTableName.get(2);
}
Comment on lines +75 to +89
Copy link

Copilot AI Feb 12, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

ShowLocksAnalyzer parses the fully qualified name by doing a raw split("\\."). This is fragile because Hive identifiers can be quoted/escaped and may legally contain '.' characters, which would be incorrectly treated as catalog/db/table separators. Prefer using the existing table-name parsing utilities (e.g., TableName.fromString(...) / HiveTableName.fromString(...) or parsing from the AST node) instead of splitting the rendered string.

Copilot uses AI. Check for mistakes.

if (getCatalog(catalogName) == null) {
throw new SemanticException(ErrorMsg.CATALOG_NOT_EXISTS, catalogName);
} else if (getDatabase(catalogName, dbName, true) == null) {
throw new SemanticException(ErrorMsg.DATABASE_NOT_EXISTS);
}
}

assert txnManager != null : "Transaction manager should be set before calling analyze";
ShowLocksDesc desc =
new ShowLocksDesc(ctx.getResFile(), tableName, partitionSpec, isExtended, txnManager.useNewShowLocksFormat());
new ShowLocksDesc(ctx.getResFile(), catalogName, dbName, tableName, partitionSpec, isExtended, txnManager.useNewShowLocksFormat());
Task<DDLWork> task = TaskFactory.get(new DDLWork(getInputs(), getOutputs(), desc));
rootTasks.add(task);

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -36,9 +36,9 @@ public class ShowLocksDesc implements DDLDesc, Serializable {
private static final String OLD_FORMAT_SCHEMA = "name,mode#string:string";
private static final String OLD_TBL_FORMAT_SCHEMA = "tab_name,mode#string:string";
private static final String OLD_DB_FORMAT_SCHEMA = "db_name,mode#string:string";
private static final String NEW_FORMAT_SCHEMA = "lockid,database,table,partition,lock_state," +
private static final String NEW_FORMAT_SCHEMA = "lockid,catalog,database,table,partition,lock_state," +
"blocked_by,lock_type,transaction_id,last_heartbeat,acquired_at,user,hostname,agent_info#" +
"string:string:string:string:string:string:string:string:string:string:string:string:string";
"string:string:string:string:string:string:string:string:string:string:string:string:string:string";

private final String resFile;
private final String catName;
Expand All @@ -58,11 +58,11 @@ public ShowLocksDesc(Path resFile, String catName, String dbName, boolean isExt,
this.isNewFormat = isNewFormat;
}

public ShowLocksDesc(Path resFile, String tableName, Map<String, String> partSpec, boolean isExt,
public ShowLocksDesc(Path resFile, String catName, String dbName, String tableName, Map<String, String> partSpec, boolean isExt,
boolean isNewFormat) {
this.resFile = resFile.toString();
this.catName = null;
this.dbName = null;
this.catName = catName;
this.dbName = dbName;
this.tableName = tableName;
this.partSpec = partSpec;
this.isExt = isExt;
Expand Down
Loading