-
Notifications
You must be signed in to change notification settings - Fork 1.9k
/
history.cpp
1589 lines (1351 loc) · 60.8 KB
/
history.cpp
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
// History functions, part of the user interface.
#include "config.h" // IWYU pragma: keep
#include <errno.h>
#include <fcntl.h>
#include <stdio.h>
#include <cstring>
// We need the sys/file.h for the flock() declaration on Linux but not OS X.
#include <sys/file.h> // IWYU pragma: keep
#include <sys/stat.h>
#include <unistd.h>
#include <algorithm>
#include <chrono>
#include <cwchar>
#include <functional>
#include <iterator>
#include <map>
#include <random>
#include <unordered_set>
#include "ast.h"
#include "common.h"
#include "env.h"
#include "expand.h"
#include "fallback.h" // IWYU pragma: keep
#include "fds.h"
#include "flog.h"
#include "global_safety.h"
#include "history.h"
#include "history_file.h"
#include "io.h"
#include "iothread.h"
#include "lru.h"
#include "operation_context.h"
#include "parse_constants.h"
#include "parse_util.h"
#include "path.h"
#include "wcstringutil.h"
#include "wildcard.h" // IWYU pragma: keep
#include "wutil.h" // IWYU pragma: keep
// Our history format is intended to be valid YAML. Here it is:
//
// - cmd: ssh blah blah blah
// when: 2348237
// paths:
// - /path/to/something
// - /path/to/something_else
//
// Newlines are replaced by \n. Backslashes are replaced by \\.
// This is the history session ID we use by default if the user has not set env var fish_history.
#define DFLT_FISH_HISTORY_SESSION_ID L"fish"
// When we rewrite the history, the number of items we keep.
#define HISTORY_SAVE_MAX (1024 * 256)
// Default buffer size for flushing to the history file.
#define HISTORY_OUTPUT_BUFFER_SIZE (64 * 1024)
// The file access mode we use for creating history files
static constexpr int history_file_mode = 0600;
// How many times we retry to save
// Saving may fail if the file is modified in between our opening
// the file and taking the lock
static constexpr int max_save_tries = 1024;
namespace {
/// If the size of \p buffer is at least \p min_size, output the contents of a string \p str to \p
/// fd, and clear the string. \return 0 on success, an error code on failure.
int flush_to_fd(std::string *buffer, int fd, size_t min_size) {
if (buffer->empty() || buffer->size() < min_size) {
return 0;
}
if (write_loop(fd, buffer->data(), buffer->size()) < 0) {
return errno;
}
buffer->clear();
return 0;
}
class time_profiler_t {
const char *what;
double start;
public:
explicit time_profiler_t(const char *w) {
what = w;
start = timef();
}
~time_profiler_t() {
double end = timef();
FLOGF(profile_history, "%s: %.0f ms", what, (end - start) * 1000);
}
};
/// \return the path for the history file for the given \p session_id, or none() if it could not be
/// loaded. If suffix is provided, append that suffix to the path; this is used for temporary files.
maybe_t<wcstring> history_filename(const wcstring &session_id, const wcstring &suffix = {}) {
if (session_id.empty()) return none();
wcstring result;
if (!path_get_data(result)) return none();
result.append(L"/");
result.append(session_id);
result.append(L"_history");
result.append(suffix);
return result;
}
} // anonymous namespace
class history_lru_cache_t : public lru_cache_t<history_item_t> {
public:
explicit history_lru_cache_t(size_t max) : lru_cache_t<history_item_t>(max) {}
/// Function to add a history item.
void add_item(history_item_t item) {
// Skip empty items.
if (item.empty()) return;
// See if it's in the cache. If it is, update the timestamp. If not, we create a new node
// and add it. Note that calling get_node promotes the node to the front.
wcstring key = item.str();
history_item_t *node = this->get(key);
if (node == nullptr) {
this->insert(std::move(key), std::move(item));
} else {
node->creation_timestamp = std::max(node->timestamp(), item.timestamp());
// What to do about paths here? Let's just ignore them.
}
}
};
/// We can merge two items if they are the same command. We use the more recent timestamp, more
/// recent identifier, and the longer list of required paths.
bool history_item_t::merge(const history_item_t &item) {
// We can only merge items if they agree on their text and persistence mode.
if (this->contents != item.contents || this->persist_mode != item.persist_mode) {
return false;
}
// Ok, merge this item.
this->creation_timestamp = std::max(this->creation_timestamp, item.creation_timestamp);
if (this->required_paths.size() < item.required_paths.size()) {
this->required_paths = item.required_paths;
}
if (this->identifier < item.identifier) {
this->identifier = item.identifier;
}
return true;
}
history_item_t::history_item_t(wcstring str, time_t when, history_identifier_t ident,
history_persistence_mode_t persist_mode)
: contents(std::move(str)),
creation_timestamp(when),
identifier(ident),
persist_mode(persist_mode) {}
bool history_item_t::matches_search(const wcstring &term, enum history_search_type_t type,
bool case_sensitive) const {
// Note that 'term' has already been lowercased when constructing the
// search object if we're doing a case insensitive search.
wcstring contents_lower;
if (!case_sensitive) {
contents_lower = wcstolower(contents);
}
const wcstring &content_to_match = case_sensitive ? contents : contents_lower;
switch (type) {
case history_search_type_t::exact: {
return term == content_to_match;
}
case history_search_type_t::contains: {
return content_to_match.find(term) != wcstring::npos;
}
case history_search_type_t::prefix: {
return string_prefixes_string(term, content_to_match);
}
case history_search_type_t::contains_glob: {
wcstring wcpattern1 = parse_util_unescape_wildcards(term);
if (wcpattern1.front() != ANY_STRING) wcpattern1.insert(0, 1, ANY_STRING);
if (wcpattern1.back() != ANY_STRING) wcpattern1.push_back(ANY_STRING);
return wildcard_match(content_to_match, wcpattern1);
}
case history_search_type_t::prefix_glob: {
wcstring wcpattern2 = parse_util_unescape_wildcards(term);
if (wcpattern2.back() != ANY_STRING) wcpattern2.push_back(ANY_STRING);
return wildcard_match(content_to_match, wcpattern2);
}
case history_search_type_t::contains_subsequence: {
return subsequence_in_string(term, content_to_match);
}
case history_search_type_t::match_everything: {
return true;
}
}
DIE("unexpected history_search_type_t value");
}
struct history_impl_t {
// Add a new history item to the end. If pending is set, the item will not be returned by
// item_at_index until a call to resolve_pending(). Pending items are tracked with an offset
// into the array of new items, so adding a non-pending item has the effect of resolving all
// pending items.
void add(history_item_t &&item, bool pending = false, bool do_save = true);
// Internal function.
void clear_file_state();
// The name of this list. Used for picking a suitable filename and for switching modes.
const wcstring name;
// New items. Note that these are NOT discarded on save. We need to keep these around so we can
// distinguish between items in our history and items in the history of other shells that were
// started after we were started.
history_item_list_t new_items;
// The index of the first new item that we have not yet written.
size_t first_unwritten_new_item_index{0};
// Whether we have a pending item. If so, the most recently added item is ignored by
// item_at_index.
bool has_pending_item{false};
// Whether we should disable saving to the file for a time.
uint32_t disable_automatic_save_counter{0};
// Deleted item contents.
// Boolean describes if it should be deleted only in this session or in all
// (used in deduplication).
std::unordered_map<wcstring, bool> deleted_items{};
// The buffer containing the history file contents.
std::unique_ptr<history_file_contents_t> file_contents{};
// The file ID of the history file.
file_id_t history_file_id = kInvalidFileID;
// The boundary timestamp distinguishes old items from new items. Items whose timestamps are <=
// the boundary are considered "old". Items whose timestemps are > the boundary are new, and are
// ignored by this instance (unless they came from this instance). The timestamp may be adjusted
// by incorporate_external_changes().
time_t boundary_timestamp{};
/// The most recent "unique" identifier for a history item.
history_identifier_t last_identifier{0};
// How many items we add until the next vacuum. Initially a random value.
int countdown_to_vacuum{-1};
// Whether we've loaded old items.
bool loaded_old{false};
// List of old items, as offsets into out mmap data.
std::deque<size_t> old_item_offsets{};
// If set, we gave up on file locking because it took too long.
// Note this is shared among all history instances.
static relaxed_atomic_bool_t abandoned_locking;
/// \return a timestamp for new items - see the implementation for a subtlety.
time_t timestamp_now() const;
/// \return a new item identifier, incrementing our counter.
history_identifier_t next_identifier() { return ++last_identifier; }
// Figure out the offsets of our file contents.
void populate_from_file_contents();
// Loads old items if necessary.
void load_old_if_needed();
// Deletes duplicates in new_items.
void compact_new_items();
// Removes trailing ephemeral items.
// Ephemeral items have leading spaces, and can only be retrieved immediately; adding any item
// removes them.
void remove_ephemeral_items();
// Attempts to rewrite the existing file to a target temporary file
// Returns false on error, true on success
bool rewrite_to_temporary_file(int existing_fd, int dst_fd) const;
// Saves history by rewriting the file.
bool save_internal_via_rewrite();
// Saves history by appending to the file.
bool save_internal_via_appending();
// Saves history.
void save(bool vacuum = false);
// Saves history unless doing so is disabled.
void save_unless_disabled();
explicit history_impl_t(wcstring name)
: name(std::move(name)), boundary_timestamp(time(nullptr)) {}
history_impl_t(history_impl_t &&) = default;
~history_impl_t() = default;
/// Returns whether this is using the default name.
bool is_default() const;
// Determines whether the history is empty. Unfortunately this cannot be const, since it may
// require populating the history.
bool is_empty();
// Remove a history item.
void remove(const wcstring &str);
// Resolves any pending history items, so that they may be returned in history searches.
void resolve_pending();
// Enable / disable automatic saving. Main thread only!
void disable_automatic_saving();
void enable_automatic_saving();
// Irreversibly clears history.
void clear();
// Clears only session.
void clear_session();
// Populates from older location ()in config path, rather than data path).
void populate_from_config_path();
// Populates from a bash history file.
void populate_from_bash(FILE *stream);
// Incorporates the history of other shells into this history.
void incorporate_external_changes();
// Gets all the history into a list. This is intended for the $history environment variable.
// This may be long!
void get_history(std::vector<wcstring> &result);
// Let indexes be a list of one-based indexes into the history, matching the interpretation of
// $history. That is, $history[1] is the most recently executed command. Values less than one
// are skipped. Return a mapping from index to history item text.
std::unordered_map<long, wcstring> items_at_indexes(const std::vector<long> &idxs);
// Sets the valid file paths for the history item with the given identifier.
void set_valid_file_paths(std::vector<wcstring> &&valid_file_paths, history_identifier_t ident);
// Return the specified history at the specified index. 0 is the index of the current
// commandline. (So the most recent item is at index 1.)
history_item_t item_at_index(size_t idx);
// Return the number of history entries.
size_t size();
// Maybe lock a history file.
// \return true if successful, false if locking was skipped.
static bool maybe_lock_file(int fd, int lock_type);
static void unlock_file(int fd);
};
relaxed_atomic_bool_t history_impl_t::abandoned_locking{false};
// static
bool history_impl_t::maybe_lock_file(int fd, int lock_type) {
assert(!(lock_type & LOCK_UN) && "Do not use lock_file to unlock");
// Don't lock if it took too long before, if we are simulating a failing lock, or if our history
// is on a remote filesystem.
if (abandoned_locking) return false;
if (history_t::chaos_mode) return false;
if (path_get_data_remoteness() == dir_remoteness_t::remote) return false;
double start_time = timef();
int retval = flock(fd, lock_type);
double duration = timef() - start_time;
if (duration > 0.25) {
FLOGF(warning, _(L"Locking the history file took too long (%.3f seconds)."), duration);
abandoned_locking = true;
}
return retval != -1;
}
// static
void history_impl_t::unlock_file(int fd) { flock(fd, LOCK_UN); }
void history_impl_t::add(history_item_t &&item, bool pending, bool do_save) {
assert(item.timestamp() != 0 && "Should not add an item with a 0 timestamp");
// We use empty items as sentinels to indicate the end of history.
// Do not allow them to be added (#6032).
if (item.contents.empty()) {
return;
}
// Try merging with the last item.
if (!new_items.empty() && new_items.back().merge(item)) {
// We merged, so we don't have to add anything. Maybe this item was pending, but it just got
// merged with an item that is not pending, so pending just becomes false.
this->has_pending_item = false;
} else {
// We have to add a new item.
new_items.push_back(item);
this->has_pending_item = pending;
if (do_save) save_unless_disabled();
}
}
void history_impl_t::save_unless_disabled() {
// Respect disable_automatic_save_counter.
if (disable_automatic_save_counter > 0) {
return;
}
// We may or may not vacuum. We try to vacuum every kVacuumFrequency items, but start the
// countdown at a random number so that even if the user never runs more than 25 commands, we'll
// eventually vacuum. If countdown_to_vacuum is -1, it means we haven't yet picked a value for
// the counter.
const int kVacuumFrequency = 25;
if (countdown_to_vacuum < 0) {
// Generate a number in the range [0, kVacuumFrequency).
std::uniform_int_distribution<unsigned> dist{0, kVacuumFrequency - 1};
unsigned seed =
static_cast<unsigned>(std::chrono::system_clock::now().time_since_epoch().count());
std::minstd_rand gen{seed};
countdown_to_vacuum = dist(gen);
}
// Determine if we're going to vacuum.
bool vacuum = false;
if (countdown_to_vacuum == 0) {
countdown_to_vacuum = kVacuumFrequency;
vacuum = true;
}
// This might be a good candidate for moving to a background thread.
time_profiler_t profiler(vacuum ? "save vacuum" //!OCLINT(unused var)
: "save no vacuum"); //!OCLINT(side-effect)
this->save(vacuum);
// Update our countdown.
assert(countdown_to_vacuum > 0);
countdown_to_vacuum--;
}
// Remove matching history entries from our list of new items. This only supports literal,
// case-sensitive, matches.
void history_impl_t::remove(const wcstring &str_to_remove) {
// Add to our list of deleted items.
deleted_items.insert(std::pair<wcstring, bool>(str_to_remove, false));
size_t idx = new_items.size();
while (idx--) {
bool matched = new_items.at(idx).str() == str_to_remove;
if (matched) {
new_items.erase(new_items.begin() + idx);
// If this index is before our first_unwritten_new_item_index, then subtract one from
// that index so it stays pointing at the same item. If it is equal to or larger, then
// we have not yet written this item, so we don't have to adjust the index.
if (idx < first_unwritten_new_item_index) {
first_unwritten_new_item_index--;
}
}
}
assert(first_unwritten_new_item_index <= new_items.size());
}
void history_impl_t::set_valid_file_paths(std::vector<wcstring> &&valid_file_paths,
history_identifier_t ident) {
// 0 identifier is used to mean "not necessary".
if (ident == 0) {
return;
}
// Look for an item with the given identifier. It is likely to be at the end of new_items.
for (auto iter = new_items.rbegin(); iter != new_items.rend(); ++iter) {
if (iter->identifier == ident) { // found it
iter->required_paths = std::move(valid_file_paths);
break;
}
}
}
void history_impl_t::get_history(std::vector<wcstring> &result) {
// If we have a pending item, we skip the first encountered (i.e. last) new item.
bool next_is_pending = this->has_pending_item;
std::unordered_set<wcstring> seen;
// Append new items.
for (auto iter = new_items.crbegin(); iter < new_items.crend(); ++iter) {
// Skip a pending item if we have one.
if (next_is_pending) {
next_is_pending = false;
continue;
}
if (seen.insert(iter->str()).second) result.push_back(iter->str());
}
// Append old items.
load_old_if_needed();
for (auto iter = old_item_offsets.crbegin(); iter != old_item_offsets.crend(); ++iter) {
size_t offset = *iter;
const history_item_t item = file_contents->decode_item(offset);
if (seen.insert(item.str()).second) result.push_back(item.str());
}
}
size_t history_impl_t::size() {
size_t new_item_count = new_items.size();
if (this->has_pending_item && new_item_count > 0) new_item_count -= 1;
load_old_if_needed();
size_t old_item_count = old_item_offsets.size();
return new_item_count + old_item_count;
}
history_item_t history_impl_t::item_at_index(size_t idx) {
// 0 is considered an invalid index.
assert(idx > 0);
idx--;
// Determine how many "resolved" (non-pending) items we have. We can have at most one pending
// item, and it's always the last one.
size_t resolved_new_item_count = new_items.size();
if (this->has_pending_item && resolved_new_item_count > 0) {
resolved_new_item_count -= 1;
}
// idx == 0 corresponds to the last resolved item.
if (idx < resolved_new_item_count) {
return new_items.at(resolved_new_item_count - idx - 1);
}
// Now look in our old items.
idx -= resolved_new_item_count;
load_old_if_needed();
size_t old_item_count = old_item_offsets.size();
if (idx < old_item_count) {
// idx == 0 corresponds to last item in old_item_offsets.
size_t offset = old_item_offsets.at(old_item_count - idx - 1);
return file_contents->decode_item(offset);
}
// Index past the valid range, so return an empty history item.
return history_item_t{};
}
std::unordered_map<long, wcstring> history_impl_t::items_at_indexes(const std::vector<long> &idxs) {
std::unordered_map<long, wcstring> result;
for (long idx : idxs) {
if (idx <= 0) {
// Skip non-positive entries.
continue;
}
// Insert an empty string to see if this is the first time the index is encountered. If so,
// we have to go fetch the item.
auto iter_inserted = result.emplace(idx, wcstring{});
if (iter_inserted.second) {
// New key.
auto item = item_at_index(size_t(idx));
iter_inserted.first->second = std::move(item.contents);
}
}
return result;
}
time_t history_impl_t::timestamp_now() const {
time_t when = time(nullptr);
// Big hack: do not allow timestamps equal to our boundary date. This is because we include
// items whose timestamps are equal to our boundary when reading old history, so we can catch
// "just closed" items. But this means that we may interpret our own items, that we just wrote,
// as old items, if we wrote them in the same second as our birthdate.
if (when == this->boundary_timestamp) {
when++;
}
return when;
}
void history_impl_t::populate_from_file_contents() {
old_item_offsets.clear();
if (file_contents) {
size_t cursor = 0;
maybe_t<size_t> offset;
while ((offset = file_contents->offset_of_next_item(&cursor, boundary_timestamp))
.has_value()) {
// Remember this item.
old_item_offsets.push_back(*offset);
}
}
FLOGF(history, "Loaded %lu old items", old_item_offsets.size());
}
void history_impl_t::load_old_if_needed() {
if (loaded_old) return;
loaded_old = true;
time_profiler_t profiler("load_old"); //!OCLINT(side-effect)
if (maybe_t<wcstring> filename = history_filename(name)) {
autoclose_fd_t file{wopen_cloexec(*filename, O_RDONLY)};
int fd = file.fd();
if (fd >= 0) {
// Take a read lock to guard against someone else appending. This is released after
// getting the file's length. We will read the file after releasing the lock, but that's
// not a problem, because we never modify already written data. In short, the purpose of
// this lock is to ensure we don't see the file size change mid-update.
//
// We may fail to lock (e.g. on lockless NFS - see issue #685. In that case, we proceed
// as if it did not fail. The risk is that we may get an incomplete history item; this
// is unlikely because we only treat an item as valid if it has a terminating newline.
bool locked = maybe_lock_file(fd, LOCK_SH);
file_contents = history_file_contents_t::create(fd);
this->history_file_id = file_contents ? file_id_for_fd(fd) : kInvalidFileID;
if (locked) unlock_file(fd);
time_profiler_t profiler("populate_from_file_contents"); //!OCLINT(side-effect)
this->populate_from_file_contents();
}
}
}
bool history_search_t::go_to_next_match(history_search_direction_t direction) {
// Backwards means increasing our index.
size_t invalid_index;
ssize_t increment;
if (direction == history_search_direction_t::backward) {
invalid_index = static_cast<size_t>(-1);
increment = 1;
} else {
assert(direction == history_search_direction_t::forward);
invalid_index = 0;
increment = -1;
}
if (current_index_ == invalid_index) return false;
size_t index = current_index_;
while ((index += increment) != invalid_index) {
history_item_t item = history_->item_at_index(index);
// We're done if it's empty or we cancelled.
if (item.empty()) {
return false;
}
// Look for an item that matches and (if deduping) that we haven't seen before.
if (!item.matches_search(canon_term_, search_type_, !ignores_case())) {
continue;
}
// Skip if deduplicating.
if (dedup() && !deduper_.insert(item.str()).second) {
continue;
}
// This is our new item.
current_item_ = std::move(item);
current_index_ = index;
return true;
}
return false;
}
const history_item_t &history_search_t::current_item() const {
assert(current_item_ && "No current item");
return *current_item_;
}
const wcstring &history_search_t::current_string() const { return this->current_item().str(); }
size_t history_search_t::current_index() const { return this->current_index_; }
void history_impl_t::clear_file_state() {
// Erase everything we know about our file.
file_contents.reset();
loaded_old = false;
old_item_offsets.clear();
}
void history_impl_t::compact_new_items() {
// Keep only the most recent items with the given contents.
std::unordered_set<wcstring> seen;
size_t idx = new_items.size();
while (idx--) {
const history_item_t &item = new_items[idx];
// Only compact persisted items.
if (!item.should_write_to_disk()) continue;
if (!seen.insert(item.contents).second) {
// This item was not inserted because it was already in the set, so delete the item at
// this index.
new_items.erase(new_items.begin() + idx);
if (idx < first_unwritten_new_item_index) {
// Decrement first_unwritten_new_item_index if we are deleting a previously written
// item.
first_unwritten_new_item_index--;
}
}
}
}
void history_impl_t::remove_ephemeral_items() {
while (!new_items.empty() &&
new_items.back().persist_mode == history_persistence_mode_t::ephemeral) {
new_items.pop_back();
}
first_unwritten_new_item_index = std::min(first_unwritten_new_item_index, new_items.size());
}
// Given the fd of an existing history file, or -1 if none, write
// a new history file to temp_fd. Returns true on success, false
// on error
bool history_impl_t::rewrite_to_temporary_file(int existing_fd, int dst_fd) const {
// We are reading FROM existing_fd and writing TO dst_fd
// dst_fd must be valid; existing_fd does not need to be
assert(dst_fd >= 0);
// Make an LRU cache to save only the last N elements.
history_lru_cache_t lru(HISTORY_SAVE_MAX);
// Read in existing items (which may have changed out from underneath us, so don't trust our
// old file contents).
if (auto local_file = history_file_contents_t::create(existing_fd)) {
size_t cursor = 0;
maybe_t<size_t> offset;
while ((offset = local_file->offset_of_next_item(&cursor, 0)).has_value()) {
// Try decoding an old item.
history_item_t old_item = local_file->decode_item(*offset);
// If old item is newer than session always erase if in deleted.
if (old_item.timestamp() > boundary_timestamp) {
if (old_item.empty() || deleted_items.count(old_item.str()) > 0) {
continue;
}
lru.add_item(std::move(old_item));
} else {
// If old item is older and in deleted items don't erase if added by
// clear_session.
if (old_item.empty() || (deleted_items.count(old_item.str()) > 0 &&
!deleted_items.at(old_item.str()))) {
continue;
}
// Add this old item.
lru.add_item(std::move(old_item));
}
}
}
// Insert any unwritten new items
for (auto iter = new_items.cbegin() + this->first_unwritten_new_item_index;
iter != new_items.cend(); ++iter) {
if (iter->should_write_to_disk()) {
lru.add_item(*iter);
}
}
// Stable-sort our items by timestamp
// This is because we may have read "old" items with a later timestamp than our "new" items
// This is the essential step that roughly orders items by history
lru.stable_sort([](const history_item_t &item1, const history_item_t &item2) {
return item1.timestamp() < item2.timestamp();
});
// Write them out.
int err = 0;
std::string buffer;
buffer.reserve(HISTORY_OUTPUT_BUFFER_SIZE + 128);
for (const auto key_item : lru) {
append_history_item_to_buffer(key_item.second, &buffer);
err = flush_to_fd(&buffer, dst_fd, HISTORY_OUTPUT_BUFFER_SIZE);
if (err) break;
}
if (!err) {
err = flush_to_fd(&buffer, dst_fd, 0);
}
if (err) {
FLOGF(history_file, L"Error %d when writing to temporary history file", err);
}
return err == 0;
}
// Returns the fd of an opened temporary file, or an invalid fd on failure.
static autoclose_fd_t create_temporary_file(const wcstring &name_template, wcstring *out_path) {
for (int attempt = 0; attempt < 10; attempt++) {
std::string narrow_str = wcs2zstring(name_template);
autoclose_fd_t out_fd{fish_mkstemp_cloexec(&narrow_str[0])};
if (out_fd.valid()) {
*out_path = str2wcstring(narrow_str);
return out_fd;
}
}
return autoclose_fd_t{};
}
bool history_impl_t::save_internal_via_rewrite() {
FLOGF(history, "Saving %lu items via rewrite",
new_items.size() - first_unwritten_new_item_index);
bool ok = false;
// We want to rewrite the file, while holding the lock for as briefly as possible
// To do this, we speculatively write a file, and then lock and see if our original file changed
// Repeat until we succeed or give up
const maybe_t<wcstring> possibly_indirect_target_name = history_filename(name);
const maybe_t<wcstring> tmp_name_template = history_filename(name, L".XXXXXX");
if (!possibly_indirect_target_name.has_value() || !tmp_name_template.has_value()) {
return false;
}
// If the history file is a symlink, we want to rewrite the real file so long as we can find it.
wcstring target_name;
if (auto maybe_real_path = wrealpath(*possibly_indirect_target_name)) {
target_name = *maybe_real_path;
} else {
target_name = *possibly_indirect_target_name;
}
// Make our temporary file
// Remember that we have to close this fd!
wcstring tmp_name;
autoclose_fd_t tmp_file = create_temporary_file(*tmp_name_template, &tmp_name);
if (!tmp_file.valid()) {
return false;
}
const int tmp_fd = tmp_file.fd();
bool done = false;
for (int i = 0; i < max_save_tries && !done; i++) {
// Open any target file, but do not lock it right away
autoclose_fd_t target_fd_before{
wopen_cloexec(target_name, O_RDONLY | O_CREAT, history_file_mode)};
file_id_t orig_file_id = file_id_for_fd(target_fd_before.fd()); // possibly invalid
bool wrote = this->rewrite_to_temporary_file(target_fd_before.fd(), tmp_fd);
target_fd_before.close();
if (!wrote) {
// Failed to write, no good
break;
}
// The crux! We rewrote the history file; see if the history file changed while we
// were rewriting it. Make an effort to take the lock before checking, to avoid racing.
// If the open fails, then proceed; this may be because there is no current history
file_id_t new_file_id = kInvalidFileID;
autoclose_fd_t target_fd_after{wopen_cloexec(target_name, O_RDONLY)};
if (target_fd_after.valid()) {
// critical to take the lock before checking file IDs,
// and hold it until after we are done replacing.
// Also critical to check the file at the path, NOT based on our fd.
// It's only OK to replace the file while holding the lock.
// Note any lock is released when target_fd_after is closed.
(void)maybe_lock_file(target_fd_after.fd(), LOCK_EX);
new_file_id = file_id_for_path(target_name);
}
bool can_replace_file = (new_file_id == orig_file_id || new_file_id == kInvalidFileID);
if (!can_replace_file) {
// The file has changed, so we're going to re-read it
// Truncate our tmp_fd so we can reuse it
if (ftruncate(tmp_fd, 0) == -1 || lseek(tmp_fd, 0, SEEK_SET) == -1) {
FLOGF(history_file, L"Error %d when truncating temporary history file", errno);
}
} else {
// The file is unchanged, or the new file doesn't exist or we can't read it
// We also attempted to take the lock, so we feel confident in replacing it
// Ensure we maintain the ownership and permissions of the original (#2355). If the
// stat fails, we assume (hope) our default permissions are correct. This
// corresponds to e.g. someone running sudo -E as the very first command. If they
// did, it would be tricky to set the permissions correctly. (bash doesn't get this
// case right either).
struct stat sbuf;
if (target_fd_after.valid() && fstat(target_fd_after.fd(), &sbuf) >= 0) {
if (fchown(tmp_fd, sbuf.st_uid, sbuf.st_gid) == -1) {
FLOGF(history_file, L"Error %d when changing ownership of history file", errno);
}
if (fchmod(tmp_fd, sbuf.st_mode) == -1) {
FLOGF(history_file, L"Error %d when changing mode of history file", errno);
}
}
// Slide it into place
if (wrename(tmp_name, target_name) == -1) {
const char *error = std::strerror(errno);
FLOGF(error, _(L"Error when renaming history file: %s"), error);
}
// We did it
done = true;
}
}
// Ensure we never leave the old file around
wunlink(tmp_name);
if (done) {
// We've saved everything, so we have no more unsaved items.
this->first_unwritten_new_item_index = new_items.size();
// We deleted our deleted items.
this->deleted_items.clear();
// Our history has been written to the file, so clear our state so we can re-reference the
// file.
this->clear_file_state();
}
return ok;
}
// Function called to save our unwritten history file by appending to the existing history file
// Returns true on success, false on failure.
bool history_impl_t::save_internal_via_appending() {
FLOGF(history, "Saving %lu items via appending",
new_items.size() - first_unwritten_new_item_index);
// No deleting allowed.
assert(deleted_items.empty());
bool ok = false;
// If the file is different (someone vacuumed it) then we need to update our mmap.
bool file_changed = false;
// Get the path to the real history file.
maybe_t<wcstring> maybe_history_path = history_filename(name);
if (!maybe_history_path) {
return true;
}
wcstring history_path = maybe_history_path.acquire();
// We are going to open the file, lock it, append to it, and then close it
// After locking it, we need to stat the file at the path; if there is a new file there, it
// means the file was replaced and we have to try again.
// Limit our max tries so we don't do this forever.
autoclose_fd_t history_fd{};
for (int i = 0; i < max_save_tries; i++) {
autoclose_fd_t fd{wopen_cloexec(history_path, O_WRONLY | O_APPEND)};
if (!fd.valid()) {
// can't open, we're hosed
break;
}
// Exclusive lock on the entire file. This is released when we close the file (below). This
// may fail on (e.g.) lockless NFS. If so, proceed as if it did not fail; the risk is that
// we may get interleaved history items, which is considered better than no history, or
// forcing everything through the slow copy-move mode. We try to minimize this possibility
// by writing with O_APPEND.
maybe_lock_file(fd.fd(), LOCK_EX);
const file_id_t file_id = file_id_for_fd(fd.fd());
if (file_id_for_path(history_path) == file_id) {
// File IDs match, so the file we opened is still at that path
// We're going to use this fd
if (file_id != this->history_file_id) {
file_changed = true;
}
history_fd = std::move(fd);
break;
}
}
if (history_fd.valid()) {
// We (hopefully successfully) took the exclusive lock. Append to the file.
// Note that this is sketchy for a few reasons:
// - Another shell may have appended its own items with a later timestamp, so our file may
// no longer be sorted by timestamp.
// - Another shell may have appended the same items, so our file may now contain
// duplicates.
//
// We cannot modify any previous parts of our file, because other instances may be reading
// those portions. We can only append.
//
// Originally we always rewrote the file on saving, which avoided both of these problems.
// However, appending allows us to save history after every command, which is nice!
//
// Periodically we "clean up" the file by rewriting it, so that most of the time it doesn't
// have duplicates, although we don't yet sort by timestamp (the timestamp isn't really used
// for much anyways).
// So far so good. Write all items at or after first_unwritten_new_item_index. Note that we
// write even a pending item - pending items are ignored by history within the command
// itself, but should still be written to the file.
// TODO: consider filling the buffer ahead of time, so we can just lock, splat, and unlock?
int err = 0;
// Use a small buffer size for appending, we usually only have 1 item
std::string buffer;
while (first_unwritten_new_item_index < new_items.size()) {
const history_item_t &item = new_items.at(first_unwritten_new_item_index);
if (item.should_write_to_disk()) {
append_history_item_to_buffer(item, &buffer);
err = flush_to_fd(&buffer, history_fd.fd(), HISTORY_OUTPUT_BUFFER_SIZE);
if (err) break;
}
// We wrote or skipped this item, hooray.
first_unwritten_new_item_index++;
}