-
-
Notifications
You must be signed in to change notification settings - Fork 4.2k
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Add support for wheel zoom renderers under the cursor #13826
base: branch-3.5
Are you sure you want to change the base?
Conversation
5d150d2
to
24766ee
Compare
6c2eb13
to
2d881cc
Compare
This tentatively ready (tests are still needed). I implemented the feature request from #13728 (comment) in |
2d881cc
to
baa1f11
Compare
One possible idea: adjust the |
I'm currently implementing grouping behavior as part of |
ok thank you! As you suggested, I think something explicit like |
baa1f11
to
b891efc
Compare
@droumis, the new behavior has landed. See the updated example and screen cast. |
Alternatively to diff --git a/examples/interaction/tools/subcoordinates_zoom.py b/examples/interaction/tools/subcoordinates_zoom.py
index 971831bff2..5f8d6d4f8b 100644
--- a/examples/interaction/tools/subcoordinates_zoom.py
+++ b/examples/interaction/tools/subcoordinates_zoom.py
@@ -4,7 +4,7 @@ from bokeh.core.properties import field
from bokeh.io import show
from bokeh.layouts import column, row
from bokeh.models import (ColumnDataSource, CustomJS, Div, FactorRange,
- GroupByModels, HoverTool, Range1d, Switch,
+ GroupByName, HoverTool, Range1d, Switch,
WheelZoomTool, ZoomInTool, ZoomOutTool)
from bokeh.palettes import Category10
from bokeh.plotting import figure
@@ -41,15 +41,13 @@ for i, channel in enumerate(channels):
source.data[channel] = data[i]
line = xy.line(field("time"), field(channel), color=Category10[10][i], source=source, name=channel)
+ line.name = "even" if i % 2 == 0 else "odd"
renderers.append(line)
level = 1
hit_test = False
-even_renderers = [ r for i, r in enumerate(renderers) if i % 2 == 0 ]
-odd_renderers = [ r for i, r in enumerate(renderers) if i % 2 == 1 ]
-
-behavior = GroupByModels(groups=[even_renderers, odd_renderers])
+behavior = GroupByName()
ywheel_zoom = WheelZoomTool(renderers=renderers, level=level, hit_test=hit_test, hit_test_mode="hline", hit_test_behavior=behavior, dimensions="height")
xwheel_zoom = WheelZoomTool(renderers=renderers, level=level, hit_test=hit_test, hit_test_mode="hline", hit_test_behavior=behavior, dimensions="width") |
b891efc
to
442b796
Compare
NICE! 👏 Codefrom bokeh.layouts import column
from bokeh.plotting import figure, curdoc
import numpy as np
from bokeh.core.properties import field
from bokeh.layouts import column, row
from bokeh.models import (ColumnDataSource, CustomJS, Div, FactorRange, HoverTool,
Range1d, Switch, WheelZoomTool, ZoomInTool, ZoomOutTool, GroupByModels)
from bokeh.palettes import Category10
from bokeh.plotting import figure
n_eeg_channels = 7
n_pos_channels = 3
n_channels = n_eeg_channels + n_pos_channels
n_seconds = 15
total_samples = 512*n_seconds
time = np.linspace(0, n_seconds, total_samples)
data = np.random.randn(n_channels, total_samples).cumsum(axis=1)
channels = [f"EEG {i}" for i in range(n_eeg_channels)] + [f"POS {i}" for i in range(n_pos_channels)]
hover = HoverTool(tooltips=[
("Channel", "$name"),
("Time", "$x s"),
("Amplitude", "$y μV"),
])
x_range = Range1d(start=time.min(), end=time.max())
y_range = FactorRange(factors=channels)
p = figure(x_range=x_range, y_range=y_range, lod_threshold=None, tools="pan,reset,xcrosshair")
source = ColumnDataSource(data=dict(time=time))
eeg_renderers = []
pos_renderers = []
for i, channel in enumerate(channels):
is_eeg = channel.startswith('EEG')
xy = p.subplot(
x_source=p.x_range,
y_source=Range1d(start=data[i].min(), end=data[i].max()),
x_target=p.x_range,
y_target=Range1d(start=i, end=i + 1),
)
source.data[channel] = data[i]
if is_eeg:
line = xy.line(field("time"), field(channel), color='black', source=source, name=channel)
eeg_renderers.append(line)
else:
line = xy.line(field("time"), field(channel), color=Category10[10][i], source=source, name=channel)
pos_renderers.append(line)
all_renderers = eeg_renderers + pos_renderers
level = 1
hit_test = True
behavior = GroupByModels(groups=[eeg_renderers, pos_renderers])
ywheel_zoom = WheelZoomTool(renderers=all_renderers, level=level, hit_test=hit_test, hit_test_mode="hline", hit_test_behavior=behavior, dimensions="height")
xwheel_zoom = WheelZoomTool(renderers=all_renderers, level=level, dimensions="width")
zoom_in = ZoomInTool(renderers=all_renderers, level=level, dimensions="height")
zoom_out = ZoomOutTool(renderers=all_renderers, level=level, dimensions="height")
p.add_tools(ywheel_zoom, xwheel_zoom, zoom_in, zoom_out, hover)
p.toolbar.active_scroll = ywheel_zoom
level_switch = Switch(active=level == 1)
level_switch.js_on_change("active", CustomJS(
args=dict(tools=[ywheel_zoom, zoom_in, zoom_out]),
code="""
export default ({tools}, obj) => {
const level = obj.active ? 1 : 0
for (const tool of tools) {
tool.level = level
}
}
"""))
hit_test_switch = Switch(active=hit_test)
hit_test_switch.js_on_change("active", CustomJS(
args=dict(tool=ywheel_zoom),
code="""
export default ({tool}, obj) => {
tool.hit_test = obj.active
}
"""))
layout = column(
row(Div(text="Zoom sub-coordinates:"), level_switch),
row(Div(text="Zoom hit-tested:"), hit_test_switch),
p,
)
curdoc().add_root(layout) video1253097405.mp4 |
This allows to zoom only renderers that were hit tested under the position of the device pointer, e.g.:
From examples/interaction/tools/subcoordinates_zoom example; showing grouping behavior (even/odd):
Screencast.from.07.06.2024.11.26.39.webm
fixes #13728