databend minio 模式试用

关注databend 很早了,最近运行试用了下(基于minio),发现databend 真的很强大了,目前也推出了cloud

环境准备

  • docker-compose 文件
version: '3'
services:
  databend-single:
    image: datafuselabs/databend:latest
    environment:
      - DATABEND_DEFAULT_USER=dalong 
      - DATABEND_DEFAULT_PASSWORD=dalong
    profiles:
      - single
    ports:
      - 3307:3307
      - 9090:9090
      - 8080:8080
      - 7070:7070
      - 8124:8124
      - 8000:8000
  databend-query:
    image: datafuselabs/databend:latest
    entrypoint: /databend-query  --config-file=/etc/databend-query.toml
    profiles:
    - cluster
    volumes:
      - "./databend-query.toml:/etc/databend-query.toml"
    ports:
      - 3307:3307
      - 9090:9090
      - 8080:8080
      - 7070:7070
      - 8124:8124
      - 8000:8000
  databend-meta:
    image: datafuselabs/databend:latest
    entrypoint: /databend-meta --config-file=/etc/databend-meta.toml
    profiles:
      - cluster
    volumes:
      - "./databend-meta.toml:/etc/databend-meta.toml"
  minio:
    image: minio/minio
    profiles:
      - cluster
    ports:
      - "9002:9000"
      - "19001:19001"
    environment:
      MINIO_ACCESS_KEY: minio
      MINIO_SECRET_KEY: minio123
    command: server --console-address :19001 --quiet /data
  • 配置
    元数据的
    databend-meta.toml
 
# Usage:
# databend-meta -c databend-meta-node-1.toml
 
log_dir            = "./.databend/logs1"
admin_api_address  = "0.0.0.0:9090"
grpc_api_address   = "0.0.0.0:9191"
 
[raft_config]
id            = 1
raft_dir      = "./.databend/meta1"
raft_api_port = 28103
 
# Assign raft_{listen|advertise}_host in test config.
# This allows you to catch a bug in unit tests when something goes wrong in raft meta nodes communication. 
raft_listen_host = "127.0.0.1"
raft_advertise_host = "localhost"
 
# Start up mode: single node cluster
single        = true

databend-query.toml

# Usage:
# databend-query -c databend_query_config_spec.toml
 
[query]
max_active_sessions = 256
wait_timeout_mills = 5000
 
# For flight rpc.
flight_api_address = "0.0.0.0:9091"
 
# Databend Query http address.
# For admin RESET API.
admin_api_address = "0.0.0.0:8080"
 
# Databend Query metrics RESET API.
metric_api_address = "0.0.0.0:7070"
 
# Databend Query MySQL Handler.
mysql_handler_host = "0.0.0.0"
mysql_handler_port = 3307
 
# Databend Query ClickHouse Handler.
clickhouse_http_handler_host = "0.0.0.0"
clickhouse_http_handler_port = 8124
 
# Databend Query HTTP Handler.
http_handler_host = "0.0.0.0"
http_handler_port = 8000
 
tenant_id = "test_tenant"
cluster_id = "test_cluster"
 
table_engine_memory_enabled = true
database_engine_github_enabled = true
 
table_cache_enabled = true
table_memory_cache_mb_size = 1024
table_disk_cache_root = "_cache"
table_disk_cache_mb_size = 10240
table_cache_bloom_index_meta_count=3000
table_cache_bloom_index_data_bytes=1073741824
 
# [[query.users]]
# name = "admin"
# auth_type = "no_password"
 
[[query.users]]
name = "dalong"
auth_type = "double_sha1_password"
# echo -n "databend" | sha1sum | cut -d' ' -f1 | xxd -r -p | sha1sum
auth_string = "6c9d6ecc326a26af6895f3016deb44365dfa0b0e" # 包含了一个默认用户dalong dalong
 
# [[query.users]]
# name = "datafuselabs"
# auth_type = "sha256_password"
# #  echo -n "datafuselabs" | sha256sum
# auth_string = "6db1a2f5da402b43c066fcadcbf78f04260b3236d9035e44dd463f21e29e6f3b"
 
 
[log]
 
[log.file]
level = "WARN"
format = "text"
dir = "./.databend/logs_1"
 
[meta]
endpoints = ["databend-meta:9191"]
username = "root"
password = "root"
client_timeout_in_second = 60
auto_sync_interval = 60
 
# Storage config.
[storage]
# fs | s3 | azblob | obs | oss
type = "s3"
 
# Set a local folder to store your data.
# Comment out this block if you're NOT using local file system as storage.
[storage.s3]
 
bucket = "databend"
endpoint_url = "http://minio:9000"
access_key_id = "minio"
secret_access_key = "minio123"
 
# To use S3-compatible object storage, uncomment this block and set your values.
# [storage.s3]
# bucket = "<your-bucket-name>"
# endpoint_url = "<your-endpoint>"
# access_key_id = "<your-key-id>"
# secret_access_key = "<your-account-key>"
# enable_virtual_host_style = false
 
# To use Azure Blob storage, uncomment this block and set your values.
# [storage.azblob]
# endpoint_url = "https://<your-storage-account-name>.blob.core.windows.net"
# container = "<your-azure-storage-container-name>"
# account_name = "<your-storage-account-name>"
# account_key = "<your-account-key>"
 
# To use OBS object storage, uncomment this block and set your values.
# [storage.obs]
# bucket = "<your-bucket-name>"
# endpoint_url = "<your-endpoint>"
# access_key_id = "<your-key-id>"
# secret_access_key = "<your-account-key>"
 
# To use OSS object storage, uncomment this block and set your values.
# [storage.oss]
# bucket = "<your-bucket-name>"
# endpoint_url = "<your-endpoint>"
# access_key_id = "<your-key-id>"
# access_key_secret = "<your-account-key>"

启动&试用

  • 启动
docker-compose --profile cluster up -d

注意启动之后需要创建对应的s3 bucket,否则insert 会有错误,create ddl 不影响

  • 使用
mysql -udalong -h127.0.0.1 -P3307  -p
create database demoapp;
use demoapp;
create table t1(a int);
insert into t1(a) values(1),(2);
select * from t1;

s3 存储效果

 

 

说明

databend 对于s3 存储的使用基于了自己可以的opendal(通用抽象层),greptimedb 也使用了此框架,目前就有rust 的db 引擎是越来越多了,大家基本都会基于
apache arrow 进行搞了,已经是一个趋势了

参考资料

https://github.com/datafuselabs/databend/
https://databend.rs/
https://github.com/datafuselabs/opendal

posted on 2022-12-27 20:34  荣锋亮  阅读(112)  评论(0编辑  收藏  举报

导航