Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

HDDS-12580. Set up Temporary RocksDB for Storing Container Log Information #8072

Open
wants to merge 3 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
@@ -0,0 +1,117 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

package org.apache.hadoop.ozone.containerlog.parser;

import java.io.File;
import java.io.IOException;
import java.util.List;
import org.apache.hadoop.hdds.conf.ConfigurationSource;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.utils.db.DBColumnFamilyDefinition;
import org.apache.hadoop.hdds.utils.db.DBStore;
import org.apache.hadoop.hdds.utils.db.DBStoreBuilder;
import org.apache.hadoop.hdds.utils.db.LongCodec;
import org.apache.hadoop.hdds.utils.db.StringCodec;

/**
* Manages container-datanode store.
*/

public class ContainerDatanodeStore {
private static final String CONTAINER_TABLE_NAME = "ContainerLogTable";
private static final DBColumnFamilyDefinition<Long, List<ContainerInfo>> CONTAINER_LOG_TABLE_COLUMN_FAMILY
= new DBColumnFamilyDefinition<>(
CONTAINER_TABLE_NAME,
LongCodec.get(),
new GenericInfoCodec<ContainerInfo>()
);
private static final String DATANODE_TABLE_NAME = "DatanodeContainerLogTable";
private static final DBColumnFamilyDefinition<String, List<DatanodeContainerInfo>>
DATANODE_CONTAINER_LOG_TABLE_COLUMN_FAMILY
= new DBColumnFamilyDefinition<>(
DATANODE_TABLE_NAME,
StringCodec.get(),
new GenericInfoCodec<DatanodeContainerInfo>()
);
private DBStore containerDbStore = null;
private ContainerLogTable<Long, List<ContainerInfo>> containerLogTable = null;
private DatanodeContainerLogTable<String, List<DatanodeContainerInfo>> datanodeContainerLogTable = null;
private DBStore dbStore = null;

private DBStore openDb(File dbPath) {

File dbFile = new File(dbPath, "ContainerDatanodeLogStore.db");

try {

ConfigurationSource conf = new OzoneConfiguration();
DBStoreBuilder dbStoreBuilder = DBStoreBuilder.newBuilder(conf);
dbStoreBuilder.setName("ContainerDatanodeLogStore.db");
dbStoreBuilder.setPath(dbFile.toPath());

dbStoreBuilder.addTable(CONTAINER_LOG_TABLE_COLUMN_FAMILY.getName());
dbStoreBuilder.addTable(DATANODE_CONTAINER_LOG_TABLE_COLUMN_FAMILY.getName());
dbStoreBuilder.addCodec(CONTAINER_LOG_TABLE_COLUMN_FAMILY.getKeyType(),
CONTAINER_LOG_TABLE_COLUMN_FAMILY.getKeyCodec());
dbStoreBuilder.addCodec(CONTAINER_LOG_TABLE_COLUMN_FAMILY.getValueType(),
CONTAINER_LOG_TABLE_COLUMN_FAMILY.getValueCodec());
dbStoreBuilder.addCodec(DATANODE_CONTAINER_LOG_TABLE_COLUMN_FAMILY.getKeyType(),
DATANODE_CONTAINER_LOG_TABLE_COLUMN_FAMILY.getKeyCodec());
dbStoreBuilder.addCodec(DATANODE_CONTAINER_LOG_TABLE_COLUMN_FAMILY.getValueType(),
DATANODE_CONTAINER_LOG_TABLE_COLUMN_FAMILY.getValueCodec());

dbStore = dbStoreBuilder.build();

containerLogTable = new ContainerLogTable<>(dbStore.getTable(CONTAINER_LOG_TABLE_COLUMN_FAMILY.getName(),
Long.class, new GenericInfoCodec<ContainerInfo>().getTypeClass()));
datanodeContainerLogTable = new DatanodeContainerLogTable<>(
dbStore.getTable(DATANODE_CONTAINER_LOG_TABLE_COLUMN_FAMILY.getName(),
String.class, new GenericInfoCodec<DatanodeContainerInfo>().getTypeClass()));

return dbStore;
} catch (IOException e) {
e.printStackTrace();
return null;
}
}

public void initialize(File dbFile) {

containerDbStore = openDb(dbFile);
}

public void insertContainerData(Long containerId, List<ContainerInfo> containerInfoList) throws IOException {
containerLogTable.put(containerId, containerInfoList);
}

public void insertContainerDatanodeData(String key, List<DatanodeContainerInfo> transitionList) throws IOException {
if (datanodeContainerLogTable.isExist(key)) {
List<DatanodeContainerInfo> existingList = datanodeContainerLogTable.get(key);
existingList.addAll(transitionList);
datanodeContainerLogTable.put(key, existingList);
} else {
datanodeContainerLogTable.put(key, transitionList);
}
}

public void close() throws IOException {
if (containerDbStore != null) {
containerDbStore.close();
}
}
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,68 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

package org.apache.hadoop.ozone.containerlog.parser;

/**
* Represents container information.
*/

public class ContainerInfo {

private String containerFinalState;
private long datanodeId;
private long containerFinalBCSID;

public ContainerInfo(String state, long dnodeId, long bcsid) {
this.containerFinalState = state;
this.datanodeId = dnodeId;
this.containerFinalBCSID = bcsid;
}

public String getContainerFinalState() {
return containerFinalState;
}

public void setContainerFinalState(String containerFinalState) {
this.containerFinalState = containerFinalState;
}

public long getDatanodeId() {
return datanodeId;
}

public void setDatanodeId(long datanodeId) {
this.datanodeId = datanodeId;
}

public long getContainerFinalBCSID() {
return containerFinalBCSID;
}

public void setContainerFinalBCSID(long containerFinalBCSID) {
this.containerFinalBCSID = containerFinalBCSID;
}

@Override
public String toString() {
return "{" +
"containerFinalState='" + containerFinalState + '\'' +
", datanodeId=" + datanodeId +
", containerFinalBCSID=" + containerFinalBCSID +
'}';
}
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,154 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

package org.apache.hadoop.ozone.containerlog.parser;

import java.io.File;
import java.io.IOException;
import java.util.List;
import org.apache.hadoop.hdds.utils.MetadataKeyFilters;
import org.apache.hadoop.hdds.utils.db.BatchOperation;
import org.apache.hadoop.hdds.utils.db.Table;
import org.apache.hadoop.hdds.utils.db.TableIterator;

/**
* Container log table.
*/

public class ContainerLogTable<KEY, VALUE> implements Table<KEY, VALUE> {
private final Table<KEY, VALUE> table;

public ContainerLogTable(Table<KEY, VALUE> table) {
this.table = table;
}

@Override
public void put(KEY key, VALUE value) throws IOException {
table.put(key, value);
}

public Table<KEY, VALUE> getTable() {
return table;
}

@Override
public void putWithBatch(BatchOperation batch, KEY key,
VALUE value) throws IOException {
table.putWithBatch(batch, key, value);
}

@Override
public boolean isEmpty() throws IOException {
return table.isEmpty();
}

@Override
public void delete(KEY key) throws IOException {
table.delete(key);
}

@Override
public void deleteRange(KEY beginKey, KEY endKey) throws IOException {
table.deleteRange(beginKey, endKey);
}

@Override
public void deleteWithBatch(BatchOperation batch, KEY key)
throws IOException {
table.deleteWithBatch(batch, key);
}

@Override
public final TableIterator<KEY, ? extends Table.KeyValue<KEY, VALUE>> iterator() throws IOException {
return new ContainerLogTableIterator<>(this);
}

@Override
public final TableIterator<KEY, ? extends Table.KeyValue<KEY, VALUE>> iterator(
KEY prefix) {
throw new UnsupportedOperationException("Iterating tables directly is not" +
" supported");
}

@Override
public String getName() throws IOException {
return table.getName();
}

@Override
public long getEstimatedKeyCount() throws IOException {
return table.getEstimatedKeyCount();
}

@Override
public boolean isExist(KEY key) throws IOException {
return table.isExist(key);
}

@Override
public VALUE get(KEY key) throws IOException {
return table.get(key);
}

@Override
public VALUE getIfExist(KEY key) throws IOException {
return table.getIfExist(key);
}

@Override
public VALUE getReadCopy(KEY key) throws IOException {
return table.getReadCopy(key);
}

@Override
public List<? extends Table.KeyValue<KEY, VALUE>> getRangeKVs(
KEY startKey, int count, KEY prefix,
MetadataKeyFilters.MetadataKeyFilter... filters)
throws IOException, IllegalArgumentException {
return table.getRangeKVs(startKey, count, prefix, filters);
}

@Override
public List<? extends Table.KeyValue<KEY, VALUE>> getSequentialRangeKVs(
KEY startKey, int count, KEY prefix,
MetadataKeyFilters.MetadataKeyFilter... filters)
throws IOException, IllegalArgumentException {
return table.getSequentialRangeKVs(startKey, count, prefix, filters);
}

@Override
public void deleteBatchWithPrefix(BatchOperation batch, KEY prefix)
throws IOException {
table.deleteBatchWithPrefix(batch, prefix);
}

@Override
public void dumpToFileWithPrefix(File externalFile, KEY prefix)
throws IOException {
table.dumpToFileWithPrefix(externalFile, prefix);
}

@Override
public void loadFromFile(File externalFile) throws IOException {
table.loadFromFile(externalFile);
}

@Override
public void close() throws Exception {
table.close();
}
}
Loading