Skip to content

Commit

Permalink
debug thread-safe zerolog usage, rs/zerolog#555, try to SyncWriter ->…
Browse files Browse the repository at this point in the history
… ConsoleWriter, increase some clickhouse timeout in test config, cause clickhouse-go/v2 got some timeouts during execution
  • Loading branch information
Slach committed Jun 14, 2023
1 parent 907377c commit 733e689
Show file tree
Hide file tree
Showing 11 changed files with 158 additions and 160 deletions.
5 changes: 3 additions & 2 deletions cmd/clickhouse-backup/main.go
Expand Up @@ -24,11 +24,12 @@ var (
func main() {
zerolog.TimeFieldFormat = zerolog.TimeFormatUnixMs
zerolog.ErrorStackMarshaler = pkgerrors.MarshalStack
//consoleWriter := zerolog.ConsoleWriter{Out: os.Stdout, NoColor: true, TimeFormat: "2006-01-02 15:04:05.000"}
consoleWriter := zerolog.ConsoleWriter{Out: os.Stdout, NoColor: true, TimeFormat: "2006-01-02 15:04:05.000"}
//diodeWriter := diode.NewWriter(consoleWriter, 4096, 10*time.Millisecond, func(missed int) {
// fmt.Printf("Logger Dropped %d messages", missed)
//})
log.Logger = zerolog.New(zerolog.SyncWriter(os.Stdout)).With().Timestamp().Caller().Logger()
log.Logger = zerolog.New(zerolog.SyncWriter(consoleWriter)).With().Timestamp().Caller().Logger()
//zerolog.SetGlobalLevel(zerolog.Disabled)
//log.Logger = zerolog.New(os.Stdout).With().Timestamp().Caller().Logger()
stdlog.SetOutput(log.Logger)
cliapp := cli.NewApp()
Expand Down
6 changes: 2 additions & 4 deletions pkg/clickhouse/clickhouse.go
Expand Up @@ -909,12 +909,10 @@ func (ch *ClickHouse) SelectSingleRowNoCtx(dest interface{}, query string, args

func (ch *ClickHouse) LogQuery(query string, args ...interface{}) string {
var logF *zerolog.Event
// zerolog is not thread-safe https://github.com/rs/zerolog/issues/242 ;(
logger := ch.Logger.With().Logger()
if !ch.Config.LogSQLQueries {
logF = logger.Debug()
logF = ch.Logger.Debug()
} else {
logF = logger.Info()
logF = ch.Logger.Info()
}
if len(args) > 0 {
logF.Msg(strings.NewReplacer("\n", " ", "\r", " ", "\t", " ").Replace(fmt.Sprintf("%s with args %v", query, args)))
Expand Down
1 change: 0 additions & 1 deletion pkg/log_helper/log_level.go
Expand Up @@ -19,5 +19,4 @@ func SetLogLevelFromString(logLevel string) {
level = zerolog.InfoLevel
}
zerolog.SetGlobalLevel(level)
zerolog.SetGlobalLevel(zerolog.Disabled)
}
5 changes: 2 additions & 3 deletions pkg/resumable/state.go
Expand Up @@ -105,13 +105,12 @@ func (s *State) IsAlreadyProcessed(path string) (bool, int64) {
res := strings.Index(s.currentState, path+":")
if res >= 0 {
// s.logger is non thread-safe https://github.com/rs/zerolog/issues/242
logger := s.logger.With().Logger()
logger.Info().Msgf("%s already processed", path)
s.logger.Info().Msgf("%s already processed", path)
sSize := s.currentState[res : res+strings.Index(s.currentState[res:], "\n")]
sSize = sSize[strings.Index(sSize, ":")+1:]
size, err = strconv.ParseInt(sSize, 10, 64)
if err != nil {
logger.Warn().Msgf("invalid size %s in upload state: %v", sSize, err)
s.logger.Warn().Msgf("invalid size %s in upload state: %v", sSize, err)
}
}
s.mx.RUnlock()
Expand Down
54 changes: 27 additions & 27 deletions test/integration/config-custom-kopia.yml
@@ -1,27 +1,27 @@
general:
disable_progress_bar: true
remote_storage: custom
upload_concurrency: 4
download_concurrency: 4
skip_tables:
- " system.*"
- "INFORMATION_SCHEMA.*"
- "information_schema.*"
restore_schema_on_cluster: "{cluster}"
use_resumable_state: false
clickhouse:
host: 127.0.0.1
port: 9440
username: backup
password: meow=& 123?*%# МЯУ
secure: true
skip_verify: true
sync_replicated_tables: true
timeout: 1s
restart_command: bash -c 'echo "FAKE RESTART"'
custom:
# all `kopia` uploads are incremental
upload_command: /custom/kopia/upload.sh {{ .backupName }}
download_command: /custom/kopia/download.sh {{ .backupName }}
delete_command: /custom/kopia/delete.sh {{ .backupName }}
list_command: /custom/kopia/list.sh
general:
disable_progress_bar: true
remote_storage: custom
upload_concurrency: 4
download_concurrency: 4
skip_tables:
- " system.*"
- "INFORMATION_SCHEMA.*"
- "information_schema.*"
restore_schema_on_cluster: "{cluster}"
use_resumable_state: false
clickhouse:
host: 127.0.0.1
port: 9440
username: backup
password: meow=& 123?*%# МЯУ
secure: true
skip_verify: true
sync_replicated_tables: true
timeout: 2s
restart_command: bash -c 'echo "FAKE RESTART"'
custom:
# all `kopia` uploads are incremental
upload_command: /custom/kopia/upload.sh {{ .backupName }}
download_command: /custom/kopia/download.sh {{ .backupName }}
delete_command: /custom/kopia/delete.sh {{ .backupName }}
list_command: /custom/kopia/list.sh
52 changes: 26 additions & 26 deletions test/integration/config-custom-restic.yml
@@ -1,26 +1,26 @@
general:
disable_progress_bar: true
remote_storage: custom
upload_concurrency: 4
download_concurrency: 4
skip_tables:
- " system.*"
- "INFORMATION_SCHEMA.*"
- "information_schema.*"
restore_schema_on_cluster: "{cluster}"
use_resumable_state: false
clickhouse:
host: 127.0.0.1
port: 9440
username: backup
password: meow=& 123?*%# МЯУ
secure: true
skip_verify: true
sync_replicated_tables: true
timeout: 1s
restart_command: bash -c 'echo "FAKE RESTART"'
custom:
upload_command: /custom/restic/upload.sh {{ .backupName }} {{ .diffFromRemote }}
download_command: /custom/restic/download.sh {{ .backupName }}
delete_command: /custom/restic/delete.sh {{ .backupName }}
list_command: /custom/restic/list.sh
general:
disable_progress_bar: true
remote_storage: custom
upload_concurrency: 4
download_concurrency: 4
skip_tables:
- " system.*"
- "INFORMATION_SCHEMA.*"
- "information_schema.*"
restore_schema_on_cluster: "{cluster}"
use_resumable_state: false
clickhouse:
host: 127.0.0.1
port: 9440
username: backup
password: meow=& 123?*%# МЯУ
secure: true
skip_verify: true
sync_replicated_tables: true
timeout: 2s
restart_command: bash -c 'echo "FAKE RESTART"'
custom:
upload_command: /custom/restic/upload.sh {{ .backupName }} {{ .diffFromRemote }}
download_command: /custom/restic/download.sh {{ .backupName }}
delete_command: /custom/restic/delete.sh {{ .backupName }}
list_command: /custom/restic/list.sh
52 changes: 26 additions & 26 deletions test/integration/config-custom-rsync.yml
@@ -1,26 +1,26 @@
general:
disable_progress_bar: true
remote_storage: custom
upload_concurrency: 4
download_concurrency: 4
skip_tables:
- " system.*"
- "INFORMATION_SCHEMA.*"
- "information_schema.*"
restore_schema_on_cluster: "{cluster}"
use_resumable_state: false
clickhouse:
host: 127.0.0.1
port: 9440
username: backup
password: meow=& 123?*%# МЯУ
secure: true
skip_verify: true
sync_replicated_tables: true
timeout: 1s
restart_command: bash -c 'echo "FAKE RESTART"'
custom:
upload_command: /custom/rsync/upload.sh {{ .backupName }} {{ .diffFromRemote }}
download_command: /custom/rsync/download.sh {{ .backupName }}
delete_command: /custom/rsync/delete.sh {{ .backupName }}
list_command: /custom/rsync/list.sh
general:
disable_progress_bar: true
remote_storage: custom
upload_concurrency: 4
download_concurrency: 4
skip_tables:
- " system.*"
- "INFORMATION_SCHEMA.*"
- "information_schema.*"
restore_schema_on_cluster: "{cluster}"
use_resumable_state: false
clickhouse:
host: 127.0.0.1
port: 9440
username: backup
password: meow=& 123?*%# МЯУ
secure: true
skip_verify: true
sync_replicated_tables: true
timeout: 2s
restart_command: bash -c 'echo "FAKE RESTART"'
custom:
upload_command: /custom/rsync/upload.sh {{ .backupName }} {{ .diffFromRemote }}
download_command: /custom/rsync/download.sh {{ .backupName }}
delete_command: /custom/rsync/delete.sh {{ .backupName }}
list_command: /custom/rsync/list.sh
64 changes: 32 additions & 32 deletions test/integration/config-database-mapping.yml
@@ -1,32 +1,32 @@
general:
disable_progress_bar: true
remote_storage: s3
upload_concurrency: 4
download_concurrency: 4
restore_schema_on_cluster: "{cluster}"
restore_database_mapping:
database1: default
clickhouse:
host: 127.0.0.1
port: 9440
username: backup
password: meow=& 123?*%# МЯУ
secure: true
skip_verify: true
sync_replicated_tables: true
timeout: 1s
restart_command: bash -c 'echo "FAKE RESTART"'
s3:
access_key: access-key
secret_key: it-is-my-super-secret-key
bucket: clickhouse
endpoint: http://minio:9000
acl: private
force_path_style: true
path: backup/{cluster}/{shard}
disable_ssl: true
compression_format: tar
api:
listen: :7171
create_integration_tables: true
allow_parallel: true
general:
disable_progress_bar: true
remote_storage: s3
upload_concurrency: 4
download_concurrency: 4
restore_schema_on_cluster: "{cluster}"
restore_database_mapping:
database1: default
clickhouse:
host: 127.0.0.1
port: 9440
username: backup
password: meow=& 123?*%# МЯУ
secure: true
skip_verify: true
sync_replicated_tables: true
timeout: 2s
restart_command: bash -c 'echo "FAKE RESTART"'
s3:
access_key: access-key
secret_key: it-is-my-super-secret-key
bucket: clickhouse
endpoint: http://minio:9000
acl: private
force_path_style: true
path: backup/{cluster}/{shard}
disable_ssl: true
compression_format: tar
api:
listen: :7171
create_integration_tables: true
allow_parallel: true
70 changes: 35 additions & 35 deletions test/integration/config-s3-nodelete.yml
@@ -1,35 +1,35 @@
general:
disable_progress_bar: true
remote_storage: s3
upload_concurrency: 4
download_concurrency: 4
skip_tables:
- " system.*"
- "INFORMATION_SCHEMA.*"
- "information_schema.*"
- "_temporary_and_external_tables.*"
restore_schema_on_cluster: "{cluster}"
clickhouse:
host: 127.0.0.1
port: 9440
username: backup
password: meow=& 123?*%# МЯУ
secure: true
skip_verify: true
sync_replicated_tables: true
timeout: 1s
restart_command: bash -c 'echo "FAKE RESTART"'
s3:
access_key: nodelete
secret_key: nodelete_password
bucket: clickhouse
endpoint: http://minio:9000
acl: private
force_path_style: true
path: backup
disable_ssl: true
compression_format: tar
api:
listen: :7171
create_integration_tables: true
allow_parallel: true
general:
disable_progress_bar: true
remote_storage: s3
upload_concurrency: 4
download_concurrency: 4
skip_tables:
- " system.*"
- "INFORMATION_SCHEMA.*"
- "information_schema.*"
- "_temporary_and_external_tables.*"
restore_schema_on_cluster: "{cluster}"
clickhouse:
host: 127.0.0.1
port: 9440
username: backup
password: meow=& 123?*%# МЯУ
secure: true
skip_verify: true
sync_replicated_tables: true
timeout: 2s
restart_command: bash -c 'echo "FAKE RESTART"'
s3:
access_key: nodelete
secret_key: nodelete_password
bucket: clickhouse
endpoint: http://minio:9000
acl: private
force_path_style: true
path: backup
disable_ssl: true
compression_format: tar
api:
listen: :7171
create_integration_tables: true
allow_parallel: true
2 changes: 1 addition & 1 deletion test/integration/config-s3.yml
Expand Up @@ -17,7 +17,7 @@ clickhouse:
secure: true
skip_verify: true
sync_replicated_tables: true
timeout: 1s
timeout: 2s
restart_command: bash -c 'echo "FAKE RESTART"'
backup_mutations: true
s3:
Expand Down
7 changes: 4 additions & 3 deletions test/integration/integration_test.go
Expand Up @@ -396,9 +396,10 @@ var incrementData = []TestDataStruct{

func init() {
zerolog.TimeFieldFormat = zerolog.TimeFormatUnixMs
log.Logger = log.Output(zerolog.ConsoleWriter{Out: os.Stdout, NoColor: true, TimeFormat: "2006-01-02 15:04:05.000"})
stdlog.SetOutput(log.Logger)
zerolog.ErrorStackMarshaler = pkgerrors.MarshalStack
consoleWriter := zerolog.ConsoleWriter{Out: os.Stdout, NoColor: true, TimeFormat: "2006-01-02 15:04:05.000"}
log.Logger = zerolog.New(zerolog.SyncWriter(consoleWriter)).With().Timestamp().Logger()
stdlog.SetOutput(log.Logger)
logLevel := "info"
if os.Getenv("LOG_LEVEL") != "" {
logLevel = os.Getenv("LOG_LEVEL")
Expand Down Expand Up @@ -1289,7 +1290,7 @@ func TestS3NoDeletePermission(t *testing.T) {
r.NoError(dockerCP("config-s3-nodelete.yml", "clickhouse:/etc/clickhouse-backup/config.yml"))

ch := &TestClickHouse{}
ch.connectWithWait(r, 500*time.Millisecond, 1*time.Second)
ch.connectWithWait(r, 500*time.Millisecond, 5*time.Second)
defer ch.chbackend.Close()
generateTestData(ch, r)
r.NoError(dockerExec("clickhouse", "clickhouse-backup", "create_remote", "no_delete_backup"))
Expand Down

0 comments on commit 733e689

Please sign in to comment.